hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e94586782c25aa39dddb54cb51ed9de793e70b65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../headers/convolution.h" Convolution::Convolution(std::vector<float> IRP){ ImpulseResponse = IRP; SampleLength = IRP.size(); Counter = 0; Convs.reserve(SampleLength + 1); InputVector.reserve(SampleLength + 1); } Convolution::~Convolution() { } __global__ void gpuCalc(std::vector<float> IR, std::vector<float> IN, int i) { int th = threadIdx.x + i; // the id number of our thread outputvar += IR[th] * IN[th]; } float Convolution::getSamp(float CurSampy) { float Output = 0; outputvar = 0; //Makes a new Class instance for each sample that's put in. //That sample plays back the Impulse Response and multiplies it with the og sample it got. //Once it has played the full Impulse Response has played it gets removed. //Remove float from InputVector when it has played if(Counter >= SampleLength) { InputVector.pop_back(); } //std::cout << "Kaas\n"; InputVector.insert(InputVector.begin(), CurSampy); //InputVector.push_back(CurSampy); //Makes a new IR instance //Convs.push_back(SampleConv(ImpulseResponse, CurSampy)); //Adds all Impulse Response values // for(size_t i = 0; i < InputVector.size(); ++i) { // Output += InputVector[i] * ImpulseResponse[i]; // } for(int i = 0; i < InputVector.size()) { if (InputVector.size() - i < 1024) { hipLaunchKernelGGL(( gpuCalc), dim3(1), dim3(InputVector.size() - i), 0, 0, ImpulseResponse, InputVector, i); hipDeviceSynchronize(); i += InputVector - i; } else { hipLaunchKernelGGL(( gpuCalc), dim3(1), dim3(1024), 0, 0, ImpulseResponse, InputVector, i); hipDeviceSynchronize(); i += 1024; } } ++Counter; //std::cout << Output << "\n"; //Output = Output / InputVector.size() * 50; //std::cout << Output << "\n"; return outputvar; }
e94586782c25aa39dddb54cb51ed9de793e70b65.cu
#include <iostream> #include "../headers/convolution.h" Convolution::Convolution(std::vector<float> IRP){ ImpulseResponse = IRP; SampleLength = IRP.size(); Counter = 0; Convs.reserve(SampleLength + 1); InputVector.reserve(SampleLength + 1); } Convolution::~Convolution() { } __global__ void gpuCalc(std::vector<float> IR, std::vector<float> IN, int i) { int th = threadIdx.x + i; // the id number of our thread outputvar += IR[th] * IN[th]; } float Convolution::getSamp(float CurSampy) { float Output = 0; outputvar = 0; //Makes a new Class instance for each sample that's put in. //That sample plays back the Impulse Response and multiplies it with the og sample it got. //Once it has played the full Impulse Response has played it gets removed. //Remove float from InputVector when it has played if(Counter >= SampleLength) { InputVector.pop_back(); } //std::cout << "Kaas\n"; InputVector.insert(InputVector.begin(), CurSampy); //InputVector.push_back(CurSampy); //Makes a new IR instance //Convs.push_back(SampleConv(ImpulseResponse, CurSampy)); //Adds all Impulse Response values // for(size_t i = 0; i < InputVector.size(); ++i) { // Output += InputVector[i] * ImpulseResponse[i]; // } for(int i = 0; i < InputVector.size()) { if (InputVector.size() - i < 1024) { gpuCalc<<<1, InputVector.size() - i>>>(ImpulseResponse, InputVector, i); cudaDeviceSynchronize(); i += InputVector - i; } else { gpuCalc<<<1, 1024>>>(ImpulseResponse, InputVector, i); cudaDeviceSynchronize(); i += 1024; } } ++Counter; //std::cout << Output << "\n"; //Output = Output / InputVector.size() * 50; //std::cout << Output << "\n"; return outputvar; }
e606a071869a453c6c76dce032c6d6eac117e3cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if 0 /* *NCUDA2 * * */ #include "E:\VisualStudio\CUDA\common\book.h" #include <windows.h> #include <stdio.h> #include <stdlib.h> #define N 8 #define MAXSIZE 100 //rowQ[] __device__ int isLegal(int* L, int row, int pos ) { for(int i=0; i<row; i++) { if(L[i] == -1)return 0; if(pos == L[i])return 0; if(L[i] - pos == i - row)return 0;// if(L[i] - pos == row - i)return 0;// } return 1; } //__device__ static int myCount = 0;// __device__ void findQueen(int* L, int row, int* count) { int i; if(row == N) { return; } for(i=0; i<N; i++) { if(isLegal(L, row, i)) { L[row] = i;// if(row == N-1)// { (*count)++; //myCount++; L[row] = -1;//i //copyQueen } findQueen(L, row+1, count);// } } } __device__ void Allocate(int* pa, int* pb, int* count) { int L[N] = {0}; L[0] = *pa; L[1] = *pb; findQueen(L, 2, count); } __global__ void kernel(int* part_sum, int* L)//*sum { int y = blockIdx.x;// int x = threadIdx.x;// int a = y; int b = x; //__shared__ int cache[N];// int tid = x+y*blockDim.x; int temp = 0; //if(tid < N*N) { if(a != b && a-b != 1 && b-a != 1) { Allocate(&a, &b, &temp); } //printf("Temp = %d\n",temp); part_sum[tid] = temp; } //printf("PartSUM = %d\n",part_sum[tid]); //__syncthreads(); // // i = (N)/2; //while(i != 0) //{ // if(x <= i) // cache[x] += cache[x + i]; // __syncthreads(); // i /= 2; //} } int main() { LARGE_INTEGER t1,t2,tc,t3,t4,tq;// QueryPerformanceFrequency(&tc); QueryPerformanceCounter(&t1);//start int j,i = 0; int sum = 0;// int* partial_cnt = new int[N*N];// int* L = new int[N]; for(i=0; i<N; i++) { L[i] = 0; } for(j=0; j<N*N; j++) partial_cnt[j] = 0;// int* dev_cnt; int* dev_L; hipMalloc((void**)&dev_cnt,N*N*sizeof(int)); hipMalloc((void**)&dev_L,N*sizeof(int)); hipMemcpy(dev_cnt, partial_cnt, N*sizeof(int), hipMemcpyHostToDevice); //for(i=0; i<N; i++) //{ // L[0] = i; // for(j=0; j<N; j++) // { // if(j != L[0] && L[0]-j != 1 && j-L[0] != 1) // { // L[1] = j; // HANDLE_ERROR( hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice) ); // kernel<<<1,1>>>(dev_cnt, dev_L);// // hipMemcpy(partial_cnt, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(int k=0; k<N; k++) // sum += partial_cnt[k]; // } // } //} HANDLE_ERROR( hipMemcpy(dev_L, L, N*sizeof(int), hipMemcpyHostToDevice) ); QueryPerformanceFrequency(&tq);// QueryPerformanceCounter(&t3);// hipLaunchKernelGGL(( kernel), dim3(N),dim3(N), 0, 0, dev_cnt, dev_L);// QueryPerformanceCounter(&t4);// hipMemcpy(partial_cnt, dev_cnt, N*N*sizeof(int),hipMemcpyDeviceToHost); for(int k=0; k<N*N; k++) { //printf("Part = %d\n",partial_cnt[k]); sum += partial_cnt[k]; } printf("\n%d\n", sum); QueryPerformanceCounter(&t2);//end printf("Use Time:%f\n",(t2.QuadPart-t1.QuadPart)*1.0/tc.QuadPart);// printf("Kernel Use Time:%f\n",(t4.QuadPart-t3.QuadPart)*1.0/tq.QuadPart);// return 0; } // //int main() //{ // int i=0,sum=0; // int count[N] = {0}; // int* L = new int[N]; // for(i=0; i<N; i++) // L[i] = 0; // int* dev_cnt; // int* dev_L; // hipMalloc((void**)&dev_cnt,N*sizeof(int)); // hipMalloc((void**)&dev_L,N*sizeof(int)); // hipMemcpy(dev_cnt, count, // N*sizeof(int), // hipMemcpyHostToDevice); // // //HANDLE_ERROR( hipMemcpy(dev_L, L, // // N*sizeof(int), // // hipMemcpyHostToDevice) ); // /**/ // LARGE_INTEGER t1,t2,tc;// // QueryPerformanceFrequency(&tc);// // QueryPerformanceCounter(&t1);// // i = 0; // // switch(N) // { ////===================4======================== // case 4: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // ////=================5========================== // case 5: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================6========================== // case 6: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================7========================== // case 7: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // break; // //=================8========================== // case 8: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================9========================== // case 9: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // break; // //=================10========================== // case 10: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================11========================== // case 11: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // break; // //=================12========================== // case 12: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // L[0] = 11; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================13========================== // case 13: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 11; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================14========================== // case 14: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 11; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 13; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================15========================== // case 15: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 11; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 13; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 14; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================16========================== // case 16: // L[0] = 0; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // // L[0] = 11; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 13; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 14; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 15; // hipMemcpy(dev_L, L, // N*sizeof(int), // hipMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // hipMemcpy(count, dev_cnt, N*sizeof(int),hipMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // // } // QueryPerformanceCounter(&t2);// // printf("\n%d\n", sum); // // printf("Use Time:%f\n",(t2.QuadPart-t1.QuadPart)*1.0/tc.QuadPart);// // hipFree(dev_cnt); // delete []L; // return 0; //} #endif
e606a071869a453c6c76dce032c6d6eac117e3cf.cu
#if 0 /* *N皇后,利用CUDA将传统的串行算法——递归回溯,并行化,规划最初2层 * * */ #include "E:\VisualStudio\CUDA\common\book.h" #include <windows.h> #include <stdio.h> #include <stdlib.h> #define N 8 #define MAXSIZE 100 //合法性判断——该row某列与其之前所有行比较,Q[]存储之前行合法列位置 __device__ int isLegal(int* L, int row, int pos ) { for(int i=0; i<row; i++) { if(L[i] == -1)return 0; if(pos == L[i])return 0; if(L[i] - pos == i - row)return 0;//正对角线 if(L[i] - pos == row - i)return 0;//斜对角线 } return 1; } //__device__ static int myCount = 0;//统计解个数 __device__ void findQueen(int* L, int row, int* count) { int i; if(row == N) { return; } for(i=0; i<N; i++) { if(isLegal(L, row, i)) { L[row] = i;//该行找到合法位置 if(row == N-1)//递归到最后一行 { (*count)++; //myCount++; L[row] = -1;//继续查找该行i列以后的位置 //copyQueen } findQueen(L, row+1, count);//递归下一行 } } } __device__ void Allocate(int* pa, int* pb, int* count) { int L[N] = {0}; L[0] = *pa; L[1] = *pb; findQueen(L, 2, count); } __global__ void kernel(int* part_sum, int* L)//*sum每个元素为部分解个数 { int y = blockIdx.x;//块索引号 int x = threadIdx.x;//线程索引号 int a = y; int b = x; //__shared__ int cache[N];//共享内存 int tid = x+y*blockDim.x; int temp = 0; //if(tid < N*N) { if(a != b && a-b != 1 && b-a != 1) { Allocate(&a, &b, &temp); } //printf("Temp = %d\n",temp); part_sum[tid] = temp; } //printf("PartSUM = %d\n",part_sum[tid]); //__syncthreads(); //归约运算 // i = (N)/2; //while(i != 0) //{ // if(x <= i) // cache[x] += cache[x + i]; // __syncthreads(); // i /= 2; //} } int main() { LARGE_INTEGER t1,t2,tc,t3,t4,tq;//定义时间变量 QueryPerformanceFrequency(&tc); QueryPerformanceCounter(&t1);//总执行时间start int j,i = 0; int sum = 0;//记录所有问题解个数 int* partial_cnt = new int[N*N];//记录部分解 int* L = new int[N]; for(i=0; i<N; i++) { L[i] = 0; } for(j=0; j<N*N; j++) partial_cnt[j] = 0;//初始化 int* dev_cnt; int* dev_L; cudaMalloc((void**)&dev_cnt,N*N*sizeof(int)); cudaMalloc((void**)&dev_L,N*sizeof(int)); cudaMemcpy(dev_cnt, partial_cnt, N*sizeof(int), cudaMemcpyHostToDevice); //for(i=0; i<N; i++) //{ // L[0] = i; // for(j=0; j<N; j++) // { // if(j != L[0] && L[0]-j != 1 && j-L[0] != 1) // { // L[1] = j; // HANDLE_ERROR( cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice) ); // kernel<<<1,1>>>(dev_cnt, dev_L);//执行核函数 // cudaMemcpy(partial_cnt, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(int k=0; k<N; k++) // sum += partial_cnt[k]; // } // } //} HANDLE_ERROR( cudaMemcpy(dev_L, L, N*sizeof(int), cudaMemcpyHostToDevice) ); QueryPerformanceFrequency(&tq);// QueryPerformanceCounter(&t3);//核函数开始时间 kernel<<<N,N>>>(dev_cnt, dev_L);//执行核函数 QueryPerformanceCounter(&t4);//核函数结束时间 cudaMemcpy(partial_cnt, dev_cnt, N*N*sizeof(int),cudaMemcpyDeviceToHost); for(int k=0; k<N*N; k++) { //printf("Part = %d\n",partial_cnt[k]); sum += partial_cnt[k]; } printf("\n总共%d个解\n", sum); QueryPerformanceCounter(&t2);//总执行时间end printf("Use Time:%f\n",(t2.QuadPart-t1.QuadPart)*1.0/tc.QuadPart);//打印耗时 printf("Kernel Use Time:%f\n",(t4.QuadPart-t3.QuadPart)*1.0/tq.QuadPart);//打印核函数耗时 return 0; } //首次改进——有误 //int main() //{ // int i=0,sum=0; // int count[N] = {0}; // int* L = new int[N]; // for(i=0; i<N; i++) // L[i] = 0; // int* dev_cnt; // int* dev_L; // cudaMalloc((void**)&dev_cnt,N*sizeof(int)); // cudaMalloc((void**)&dev_L,N*sizeof(int)); // cudaMemcpy(dev_cnt, count, // N*sizeof(int), // cudaMemcpyHostToDevice); // // //HANDLE_ERROR( cudaMemcpy(dev_L, L, // // N*sizeof(int), // // cudaMemcpyHostToDevice) ); // /*时间计时函数*/ // LARGE_INTEGER t1,t2,tc;//定义时间变量 // QueryPerformanceFrequency(&tc);// // QueryPerformanceCounter(&t1);// // i = 0; // // switch(N) // { ////===================4皇后======================== // case 4: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // ////=================5皇后========================== // case 5: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================6皇后========================== // case 6: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================7皇后========================== // case 7: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // break; // //=================8皇后========================== // case 8: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================9皇后========================== // case 9: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // break; // //=================10皇后========================== // case 10: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================11皇后========================== // case 11: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // break; // //=================12皇后========================== // case 12: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // L[0] = 11; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================13皇后========================== // case 13: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 11; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================14皇后========================== // case 14: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 11; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 13; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================15皇后========================== // case 15: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 11; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 13; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 14; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // //=================16皇后========================== // case 16: // L[0] = 0; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 1; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 2; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 3; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 4; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 5; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 6; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 7; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 8; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 9; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 10; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // // L[0] = 11; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 12; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 13; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 14; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // L[0] = 15; // cudaMemcpy(dev_L, L, // N*sizeof(int), // cudaMemcpyHostToDevice); // kernel<<<1,N>>>(dev_cnt, dev_L); // cudaMemcpy(count, dev_cnt, N*sizeof(int),cudaMemcpyDeviceToHost); // for(i=0; i<N; i++) // sum += count[i]; // // break; // // } // QueryPerformanceCounter(&t2);//结束时间 // printf("\n总共%d个解\n", sum); // // printf("Use Time:%f\n",(t2.QuadPart-t1.QuadPart)*1.0/tc.QuadPart);//打印耗时 // cudaFree(dev_cnt); // delete []L; // return 0; //} #endif
3c971626e096b2fc41c5b0097ef57e194b7ad738.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core/cuda_types.hpp> #include <opencv2/cudev/common.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <hip/hip_vector_types.h> using namespace cv; using namespace cv::cuda; // __global__ void combine_kernel(const PtrStepSz<uchar4> imageL, const PtrStepSz<uchar4> imageR, const PtrStepSz<float> flowMagL, const PtrStepSz<float> flowMagR, PtrStep<uchar4> dst){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x < imageL.cols && y < imageL.rows) { uchar4 colorL = imageL(y,x); uchar4 colorR = imageR(y,x); unsigned char outAlpha; if(colorL.w > colorR.w) { if(colorL.w / 255.0f > 0.1) outAlpha = 255; else outAlpha = 0; } else { if(colorR.w / 255.0f > 0.1) outAlpha = 255; else outAlpha = 0; } uchar4 colorMixed; if (colorL.w == 0 && colorR.w == 0) { colorMixed = make_uchar4(0, 0, 0, outAlpha); } else if (colorL.w == 0) { colorMixed = make_uchar4(colorR.x, colorR.y, colorR.z, outAlpha); } else if (colorR.w == 0) { colorMixed = make_uchar4(colorL.x, colorL.y, colorL.z, outAlpha); } else { const float magL = flowMagL(y,x) / float(imageL.cols); const float magR = flowMagR(y,x) / float(imageL.cols); float blendL = float(colorL.w); float blendR = float(colorR.w); float norm = blendL + blendR; blendL /= norm; blendR /= norm; const float colorDiff = (abs(colorL.x - colorR.x) + abs(colorL.y - colorR.y) + abs(colorL.z - colorR.z)) / 255.0f; const float kColorDiffCoef = 10.0f; const float kSoftmaxSharpness = 10.0f; const float kFlowMagCoef = 20.0f; // NOTE: this is scaled differently than the test version due to normalizing magL & magR by imageL.cols const float deghostCoef = tanhf(colorDiff * kColorDiffCoef); const double expL = exp(kSoftmaxSharpness * blendL * (1.0 + kFlowMagCoef * magL)); const double expR = exp(kSoftmaxSharpness * blendR * (1.0 + kFlowMagCoef * magR)); const double sumExp = expL + expR + 0.00001; const float softmaxL = float(expL / sumExp); const float softmaxR = float(expR / sumExp); colorMixed = make_uchar4( float(colorL.x)* (blendL * (1-deghostCoef) + softmaxL * deghostCoef) + float(colorR.x)*(blendR * (1-deghostCoef) + softmaxR * deghostCoef), float(colorL.y)* (blendL * (1-deghostCoef) + softmaxL * deghostCoef) + float(colorR.y)*(blendR * (1-deghostCoef) + softmaxR * deghostCoef), float(colorL.z)* (blendL * (1-deghostCoef) + softmaxL * deghostCoef) + float(colorR.z)*(blendR * (1-deghostCoef) + softmaxR * deghostCoef), 255); } dst(y, x) = colorMixed; // uchar4 v = imageL(y,x); // dst(y,x) = make_uchar4(v.x,v.y,v.z,255); } } void combine_caller(const PtrStepSz<uchar4>& imageL, const PtrStepSz<uchar4>& imageR, const PtrStepSz<float>& flowMagL, const PtrStepSz<float>& flowMagR, PtrStep<uchar4> dst,hipStream_t stream){ dim3 block(32,8); dim3 grid((imageL.cols + block.x - 1)/block.x,(imageL.rows + block.y - 1)/block.y); hipLaunchKernelGGL(( combine_kernel), dim3(grid),dim3(block),0,stream, imageL,imageR,flowMagL,flowMagR, dst); if(stream == 0) hipDeviceSynchronize(); } __global__ void shift_kernel(const PtrStepSz<float> shiftMat, PtrStep<uchar4> dst){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x < shiftMat.cols && y < shiftMat.rows) { uchar4 image = dst(y,x); float alpha = shiftMat(y,x); image.w = (int)(image.w * alpha); dst(y,x) = image; } } void alpha_cuda_caller(const PtrStepSz<float>& shiftMat, PtrStep<uchar4> dst, hipStream_t stream){ dim3 block(32,8); dim3 grid((shiftMat.cols + block.x - 1)/block.x,(shiftMat.rows + block.y - 1)/block.y); hipLaunchKernelGGL(( shift_kernel), dim3(grid),dim3(block),0,stream, shiftMat,dst); if(stream == 0) hipDeviceSynchronize(); } __global__ void feather_alpha_chanel_kernel( const PtrStepSz<uchar4> src, PtrStep<uchar4> dst, const int feathersize) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; const int yFeatherStart = src.rows - 1 - feathersize; if (x < src.cols && y < src.rows) { uchar4 gsrc = src(y, x); const float alpha = 1.0f - float(y - yFeatherStart) / float(feathersize); uchar4 colorMixed; unsigned char temp = gsrc.w <= (unsigned char)(255.0f * alpha) ? gsrc.w : (unsigned char)(255.0f * alpha); if (y >= yFeatherStart) { colorMixed = make_uchar4( gsrc.x, gsrc.y, gsrc.z, temp ); } else { colorMixed = make_uchar4( gsrc.x, gsrc.y, gsrc.z, gsrc.w ); } dst(y, x) = colorMixed; } } void feather_alpha_chanel_caller( const PtrStepSz<uchar4>& src, PtrStep<uchar4> dst, const int feathersize, hipStream_t stream) { dim3 block(32, 8); dim3 grid((src.cols + block.x - 1) / block.x, (src.rows + block.y - 1) / block.y); hipLaunchKernelGGL(( feather_alpha_chanel_kernel) , dim3(grid), dim3(block), 0, stream , src, dst, feathersize); if (stream == 0) hipDeviceSynchronize(); } __global__ void extendimage_kernal(const PtrStepSz<uchar4> gpucroppedSideSpherical, PtrStep<uchar4> gpuextendedSideSpherical, const PtrStepSz<uchar4> gpufisheyeSpherical, PtrStep<uchar4> gpuextendedFisheyeSpherical, const int extendedWidth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; const int width = gpufisheyeSpherical.cols; if (x < extendedWidth && y < gpufisheyeSpherical.rows) { uchar4 g1 = gpucroppedSideSpherical(y, x % width); uchar4 g2 = gpufisheyeSpherical(y, x % width); gpuextendedSideSpherical(y, x) = g1; gpuextendedFisheyeSpherical(y, x) = g2; } } void extendimage_caller(const PtrStepSz<uchar4>& gpucroppedSideSpherical, PtrStep<uchar4> gpuextendedSideSpherical, const PtrStepSz<uchar4>& gpufisheyeSpherical, PtrStep<uchar4> gpuextendedFisheyeSpherical, const int extendedWidth, hipStream_t stream) { dim3 block(32, 8); dim3 grid((extendedWidth + block.x - 1) / block.x, (gpucroppedSideSpherical.rows + block.y - 1) / block.y); extendimage_kernal << <grid, block, 0, stream >> >(gpucroppedSideSpherical, gpuextendedSideSpherical, gpufisheyeSpherical, gpuextendedFisheyeSpherical, extendedWidth); if (stream == 0) hipDeviceSynchronize(); } __global__ void flatten_kernel( const PtrStepSz<uchar4> bottomLayer, const PtrStepSz<uchar4> topLayer, PtrStep<uchar4> dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < bottomLayer.cols && y < bottomLayer.rows && x >= 0 && y >= 0) { uchar4 baseColor = bottomLayer(y, x); uchar4 topColor; if (x < topLayer.cols && y < topLayer.rows) topColor = topLayer(y, x); else topColor = make_uchar4(0.0, 0.0, 0.0, 0.0); const float colorDiff = (abs(baseColor.x - topColor.x) + abs(baseColor.y - topColor.y) + abs(baseColor.z - topColor.z)) / 255.0f; static const float kColorDiffCoef = 5.0f; static const float kSoftmaxSharpness = 5.0f; static const float kBaseLayerBias = 2.0f; const float deghostCoef = tanhf(colorDiff * kColorDiffCoef); const float alphaR = topColor.w / 255.0f; const float alphaL = 1.0f - alphaR; const double expL = exp(kSoftmaxSharpness * alphaL * kBaseLayerBias); const double expR = exp(kSoftmaxSharpness * alphaR); const double sumExp = expL + expR + 0.00001; const float softmaxL = float(expL / sumExp); const float softmaxR = 1.0f - softmaxL; unsigned char outAlpha; if (topColor.w >= baseColor.w) outAlpha = topColor.w; else outAlpha = baseColor.w; uchar4 colorMixed; colorMixed = make_uchar4( float(baseColor.x) * (alphaL * (1 - deghostCoef) + softmaxL * deghostCoef) + float(topColor.x) * (alphaR * (1 - deghostCoef) + softmaxR * deghostCoef), float(baseColor.y) * (alphaL * (1 - deghostCoef) + softmaxL * deghostCoef) + float(topColor.y) * (alphaR * (1 - deghostCoef) + softmaxR * deghostCoef), float(baseColor.z) * (alphaL * (1 - deghostCoef) + softmaxL * deghostCoef) + float(topColor.z) * (alphaR * (1 - deghostCoef) + softmaxR * deghostCoef), outAlpha); dst(y, x) = colorMixed; } } void flatten_caller( const PtrStepSz<uchar4>& bottomLayer, const PtrStepSz<uchar4>& topLayer, PtrStep<uchar4> dst, hipStream_t stream) { dim3 block(32, 8); dim3 grid((bottomLayer.cols + block.x - 1) / block.x, (bottomLayer.rows + block.y - 1) / block.y); flatten_kernel << <grid, block, 0, stream >> >(bottomLayer, topLayer, dst); if (stream == 0) hipDeviceSynchronize(); } __global__ void strip_kernel( const PtrStepSz<uchar4> fisheyeSpherical, const PtrStepSz<uchar4> warpedExtendedFisheyeSpherical, const PtrStepSz<uchar4> warpedSphericalForEye, PtrStep<uchar4> imgdst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int maxBlendX = float(fisheyeSpherical.cols) * 0.2; if (y < warpedSphericalForEye.rows && x < maxBlendX) { uchar4 src = warpedSphericalForEye(y, x); uchar4 wrap = warpedExtendedFisheyeSpherical(y, x+fisheyeSpherical.cols); float alpha = 1.0f - max(0.0f, min(1.0f, (x - float(maxBlendX) * 0.333f) / (float(maxBlendX) * 0.667f - float(maxBlendX) * 0.333f))); uchar4 colorMixed = make_uchar4( src.x*alpha + wrap.x*(1-alpha), src.y*alpha + wrap.y*(1-alpha), src.z*alpha + wrap.z*(1-alpha), src.w); imgdst(y, x) = colorMixed; } } void strip_caller( const PtrStepSz<uchar4> fisheyeSpherical, const PtrStepSz<uchar4> warpedExtendedFisheyeSpherical, const PtrStepSz<uchar4> warpedSphericalForEye, PtrStep<uchar4> imgdst, hipStream_t stream) { dim3 block(32, 8); dim3 grid((warpedSphericalForEye.cols + block.x - 1) / block.x, (warpedSphericalForEye.rows + block.y - 1) / block.y); strip_kernel << <grid, block, 0, stream >> >(fisheyeSpherical, warpedExtendedFisheyeSpherical, warpedSphericalForEye, imgdst); if (stream == 0) hipDeviceSynchronize(); }
3c971626e096b2fc41c5b0097ef57e194b7ad738.cu
#include <opencv2/core/cuda_types.hpp> #include <opencv2/cudev/common.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <vector_types.h> using namespace cv; using namespace cv::cuda; //自定义内核函数 __global__ void combine_kernel(const PtrStepSz<uchar4> imageL, const PtrStepSz<uchar4> imageR, const PtrStepSz<float> flowMagL, const PtrStepSz<float> flowMagR, PtrStep<uchar4> dst){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x < imageL.cols && y < imageL.rows) { uchar4 colorL = imageL(y,x); uchar4 colorR = imageR(y,x); unsigned char outAlpha; if(colorL.w > colorR.w) { if(colorL.w / 255.0f > 0.1) outAlpha = 255; else outAlpha = 0; } else { if(colorR.w / 255.0f > 0.1) outAlpha = 255; else outAlpha = 0; } uchar4 colorMixed; if (colorL.w == 0 && colorR.w == 0) { colorMixed = make_uchar4(0, 0, 0, outAlpha); } else if (colorL.w == 0) { colorMixed = make_uchar4(colorR.x, colorR.y, colorR.z, outAlpha); } else if (colorR.w == 0) { colorMixed = make_uchar4(colorL.x, colorL.y, colorL.z, outAlpha); } else { const float magL = flowMagL(y,x) / float(imageL.cols); const float magR = flowMagR(y,x) / float(imageL.cols); float blendL = float(colorL.w); float blendR = float(colorR.w); float norm = blendL + blendR; blendL /= norm; blendR /= norm; const float colorDiff = (abs(colorL.x - colorR.x) + abs(colorL.y - colorR.y) + abs(colorL.z - colorR.z)) / 255.0f; const float kColorDiffCoef = 10.0f; const float kSoftmaxSharpness = 10.0f; const float kFlowMagCoef = 20.0f; // NOTE: this is scaled differently than the test version due to normalizing magL & magR by imageL.cols const float deghostCoef = tanhf(colorDiff * kColorDiffCoef); const double expL = exp(kSoftmaxSharpness * blendL * (1.0 + kFlowMagCoef * magL)); const double expR = exp(kSoftmaxSharpness * blendR * (1.0 + kFlowMagCoef * magR)); const double sumExp = expL + expR + 0.00001; const float softmaxL = float(expL / sumExp); const float softmaxR = float(expR / sumExp); colorMixed = make_uchar4( float(colorL.x)* (blendL * (1-deghostCoef) + softmaxL * deghostCoef) + float(colorR.x)*(blendR * (1-deghostCoef) + softmaxR * deghostCoef), float(colorL.y)* (blendL * (1-deghostCoef) + softmaxL * deghostCoef) + float(colorR.y)*(blendR * (1-deghostCoef) + softmaxR * deghostCoef), float(colorL.z)* (blendL * (1-deghostCoef) + softmaxL * deghostCoef) + float(colorR.z)*(blendR * (1-deghostCoef) + softmaxR * deghostCoef), 255); } dst(y, x) = colorMixed; // uchar4 v = imageL(y,x); // dst(y,x) = make_uchar4(v.x,v.y,v.z,255); } } void combine_caller(const PtrStepSz<uchar4>& imageL, const PtrStepSz<uchar4>& imageR, const PtrStepSz<float>& flowMagL, const PtrStepSz<float>& flowMagR, PtrStep<uchar4> dst,cudaStream_t stream){ dim3 block(32,8); dim3 grid((imageL.cols + block.x - 1)/block.x,(imageL.rows + block.y - 1)/block.y); combine_kernel<<<grid,block,0,stream>>>(imageL,imageR,flowMagL,flowMagR, dst); if(stream == 0) cudaDeviceSynchronize(); } __global__ void shift_kernel(const PtrStepSz<float> shiftMat, PtrStep<uchar4> dst){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x < shiftMat.cols && y < shiftMat.rows) { uchar4 image = dst(y,x); float alpha = shiftMat(y,x); image.w = (int)(image.w * alpha); dst(y,x) = image; } } void alpha_cuda_caller(const PtrStepSz<float>& shiftMat, PtrStep<uchar4> dst, cudaStream_t stream){ dim3 block(32,8); dim3 grid((shiftMat.cols + block.x - 1)/block.x,(shiftMat.rows + block.y - 1)/block.y); shift_kernel<<<grid,block,0,stream>>>(shiftMat,dst); if(stream == 0) cudaDeviceSynchronize(); } __global__ void feather_alpha_chanel_kernel( const PtrStepSz<uchar4> src, PtrStep<uchar4> dst, const int feathersize) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; const int yFeatherStart = src.rows - 1 - feathersize; if (x < src.cols && y < src.rows) { uchar4 gsrc = src(y, x); const float alpha = 1.0f - float(y - yFeatherStart) / float(feathersize); uchar4 colorMixed; unsigned char temp = gsrc.w <= (unsigned char)(255.0f * alpha) ? gsrc.w : (unsigned char)(255.0f * alpha); if (y >= yFeatherStart) { colorMixed = make_uchar4( gsrc.x, gsrc.y, gsrc.z, temp ); } else { colorMixed = make_uchar4( gsrc.x, gsrc.y, gsrc.z, gsrc.w ); } dst(y, x) = colorMixed; } } void feather_alpha_chanel_caller( const PtrStepSz<uchar4>& src, PtrStep<uchar4> dst, const int feathersize, cudaStream_t stream) { dim3 block(32, 8); dim3 grid((src.cols + block.x - 1) / block.x, (src.rows + block.y - 1) / block.y); feather_alpha_chanel_kernel <<<grid, block, 0, stream >>>(src, dst, feathersize); if (stream == 0) cudaDeviceSynchronize(); } __global__ void extendimage_kernal(const PtrStepSz<uchar4> gpucroppedSideSpherical, PtrStep<uchar4> gpuextendedSideSpherical, const PtrStepSz<uchar4> gpufisheyeSpherical, PtrStep<uchar4> gpuextendedFisheyeSpherical, const int extendedWidth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; const int width = gpufisheyeSpherical.cols; if (x < extendedWidth && y < gpufisheyeSpherical.rows) { uchar4 g1 = gpucroppedSideSpherical(y, x % width); uchar4 g2 = gpufisheyeSpherical(y, x % width); gpuextendedSideSpherical(y, x) = g1; gpuextendedFisheyeSpherical(y, x) = g2; } } void extendimage_caller(const PtrStepSz<uchar4>& gpucroppedSideSpherical, PtrStep<uchar4> gpuextendedSideSpherical, const PtrStepSz<uchar4>& gpufisheyeSpherical, PtrStep<uchar4> gpuextendedFisheyeSpherical, const int extendedWidth, cudaStream_t stream) { dim3 block(32, 8); dim3 grid((extendedWidth + block.x - 1) / block.x, (gpucroppedSideSpherical.rows + block.y - 1) / block.y); extendimage_kernal << <grid, block, 0, stream >> >(gpucroppedSideSpherical, gpuextendedSideSpherical, gpufisheyeSpherical, gpuextendedFisheyeSpherical, extendedWidth); if (stream == 0) cudaDeviceSynchronize(); } __global__ void flatten_kernel( const PtrStepSz<uchar4> bottomLayer, const PtrStepSz<uchar4> topLayer, PtrStep<uchar4> dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < bottomLayer.cols && y < bottomLayer.rows && x >= 0 && y >= 0) { uchar4 baseColor = bottomLayer(y, x); uchar4 topColor; if (x < topLayer.cols && y < topLayer.rows) topColor = topLayer(y, x); else topColor = make_uchar4(0.0, 0.0, 0.0, 0.0); const float colorDiff = (abs(baseColor.x - topColor.x) + abs(baseColor.y - topColor.y) + abs(baseColor.z - topColor.z)) / 255.0f; static const float kColorDiffCoef = 5.0f; static const float kSoftmaxSharpness = 5.0f; static const float kBaseLayerBias = 2.0f; const float deghostCoef = tanhf(colorDiff * kColorDiffCoef); const float alphaR = topColor.w / 255.0f; const float alphaL = 1.0f - alphaR; const double expL = exp(kSoftmaxSharpness * alphaL * kBaseLayerBias); const double expR = exp(kSoftmaxSharpness * alphaR); const double sumExp = expL + expR + 0.00001; const float softmaxL = float(expL / sumExp); const float softmaxR = 1.0f - softmaxL; unsigned char outAlpha; if (topColor.w >= baseColor.w) outAlpha = topColor.w; else outAlpha = baseColor.w; uchar4 colorMixed; colorMixed = make_uchar4( float(baseColor.x) * (alphaL * (1 - deghostCoef) + softmaxL * deghostCoef) + float(topColor.x) * (alphaR * (1 - deghostCoef) + softmaxR * deghostCoef), float(baseColor.y) * (alphaL * (1 - deghostCoef) + softmaxL * deghostCoef) + float(topColor.y) * (alphaR * (1 - deghostCoef) + softmaxR * deghostCoef), float(baseColor.z) * (alphaL * (1 - deghostCoef) + softmaxL * deghostCoef) + float(topColor.z) * (alphaR * (1 - deghostCoef) + softmaxR * deghostCoef), outAlpha); dst(y, x) = colorMixed; } } void flatten_caller( const PtrStepSz<uchar4>& bottomLayer, const PtrStepSz<uchar4>& topLayer, PtrStep<uchar4> dst, cudaStream_t stream) { dim3 block(32, 8); dim3 grid((bottomLayer.cols + block.x - 1) / block.x, (bottomLayer.rows + block.y - 1) / block.y); flatten_kernel << <grid, block, 0, stream >> >(bottomLayer, topLayer, dst); if (stream == 0) cudaDeviceSynchronize(); } __global__ void strip_kernel( const PtrStepSz<uchar4> fisheyeSpherical, const PtrStepSz<uchar4> warpedExtendedFisheyeSpherical, const PtrStepSz<uchar4> warpedSphericalForEye, PtrStep<uchar4> imgdst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int maxBlendX = float(fisheyeSpherical.cols) * 0.2; if (y < warpedSphericalForEye.rows && x < maxBlendX) { uchar4 src = warpedSphericalForEye(y, x); uchar4 wrap = warpedExtendedFisheyeSpherical(y, x+fisheyeSpherical.cols); float alpha = 1.0f - max(0.0f, min(1.0f, (x - float(maxBlendX) * 0.333f) / (float(maxBlendX) * 0.667f - float(maxBlendX) * 0.333f))); uchar4 colorMixed = make_uchar4( src.x*alpha + wrap.x*(1-alpha), src.y*alpha + wrap.y*(1-alpha), src.z*alpha + wrap.z*(1-alpha), src.w); imgdst(y, x) = colorMixed; } } void strip_caller( const PtrStepSz<uchar4> fisheyeSpherical, const PtrStepSz<uchar4> warpedExtendedFisheyeSpherical, const PtrStepSz<uchar4> warpedSphericalForEye, PtrStep<uchar4> imgdst, cudaStream_t stream) { dim3 block(32, 8); dim3 grid((warpedSphericalForEye.cols + block.x - 1) / block.x, (warpedSphericalForEye.rows + block.y - 1) / block.y); strip_kernel << <grid, block, 0, stream >> >(fisheyeSpherical, warpedExtendedFisheyeSpherical, warpedSphericalForEye, imgdst); if (stream == 0) cudaDeviceSynchronize(); }
888d71a3cb2daefd497f3dde85b5e7bbc32a79b8.hip
// !!! This is a file automatically generated by hipify!!! // smc.cu --- Part of the project OPLib 1.0, a high performance pricing library // based on operator methods, higher level BLAS and multicore architectures // Author: 2009 Claudio Albanese // Maintainer: Claudio Albanese <[email protected]> // Created: April-July 2009 // Version: 1.0.0 // Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by // Vasily Volkov's implementation of SGEMM // We use several variations of the multi-threaded Mersenne Twister algorithm of // period 2203 due to Makoto Matsumoto. // The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk // included in the CUDA SDK. // CPU-side BLAS and random number generators link to primitives in the // Intel Math Kernel Libraries. // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; see the file COPYING. If not, write to // the Free Software Foundation, Inc., 59 Temple Place - Suite 330, // Boston, MA 02111-1307, USA. #include "MersenneTwister.h" #include <hip/hip_runtime.h> #define MAX_NUMBER_OF_FACTORS 100 // must be an even number #define CHOLM_SZ MAX_NUMBER_OF_FACTORS*MAX_NUMBER_OF_FACTORS #ifdef LINUX #define __declspec(x) #endif __constant__ float cholm[CHOLM_SZ]; __device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT]; static mt_struct_stripped h_MT[MT_RNG_COUNT]; #define PI 3.14159265358979f __device__ void BoxMuller(float& u1, float& u2){ float r = sqrtf(-2.0f * logf(u1)); float phi = 2 * PI * u2; u1 = r * __cosf(phi); u2 = r * __sinf(phi); } /////////////////////////////////////////////////////////////////////////////// // Polynomial approximation of cumulative normal distribution function /////////////////////////////////////////////////////////////////////////////// __device__ inline float cndGPU(float d) { const float A1 = 0.31938153f; const float A2 = -0.356563782f; const float A3 = 1.781477937f; const float A4 = -1.821255978f; const float A5 = 1.330274429f; const float RSQRT2PI = 0.39894228040143267793994605993438f; float K = 1.0f / (1.0f + 0.2316419f * fabsf(d)); float cnd = RSQRT2PI * __expf(- 0.5f * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if(d > 0) cnd = 1.0f - cnd; return cnd; } //Load twister configurations extern "C" __declspec( dllexport ) int opcuda_mc_load_mt_gpu(char * MT_stream, long sz){ char * h_MT_ptr = (char *) MT_stream; if(sz != sizeof(h_MT)) return 1; for(int i = 0; i<sizeof(h_MT); i++) { h_MT_ptr[i] = MT_stream[i]; } return 0; } extern "C" __declspec( dllexport ) int opcuda_mc_nrng() { return MT_RNG_COUNT; } extern "C" __declspec( dllexport ) int opcuda_mc_status_sz() { return MT_NN*MT_RNG_COUNT; } //////////////////////////////////////////////////////////////////////////////// // Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random. // For coalesced global writes MT_RNG_COUNT should be a multiple of warp size. // Initial states for each generator are the same, since the states are // initialized from the global seed. In order to improve distribution properties // on small NPerRng we supply dedicated (local) seeds to each twister. //////////////////////////////////////////////////////////////////////////////// __global__ void mc_setseed_device(unsigned int* mt_rs) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int iRng = tid; const int nRng = blockDim.x * gridDim.x; mt_struct_stripped config = ds_MT[iRng]; //Initialize state variable mt_rs[tid + nRng * 0] = config.seed; for(int iState = 1; iState < MT_NN; iState++) mt_rs[tid + nRng * iState] = (1812433253U * (mt_rs[tid + nRng * (iState - 1)] ^ (mt_rs[tid + nRng *(iState - 1)] >> 30)) + iState) & MT_WMASK; } //Initialize/seed twister for current GPU context extern "C" __declspec( dllexport ) void opcuda_mc_setseed(unsigned long int host_seedptr, unsigned int mtptr){ int i; //Need to be thread-safe mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped)); unsigned int * seed = (unsigned int *) host_seedptr; for(i = 0; i < MT_RNG_COUNT; i++) { MT[i] = h_MT[i]; MT[i].seed = seed[i]; } hipMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)); hipLaunchKernelGGL(( mc_setseed_device), dim3(64), dim3(64), 0, 0, (unsigned int*) mtptr); free(MT); } __global__ void mc1f_device1024(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; #pragma unroll for(int iter = 0; iter<10; iter++) { ymid = (ub + lb)/2; if(rand < rker[d * ymid]) ub = ymid; //this is the bottleneck else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } __global__ void mc1f_device512(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; #pragma unroll for(int iter = 0; iter<9; iter++) { ymid = (ub + lb)/2; if(rand < rker[d * ymid]) ub = ymid; //this read is the bottleneck else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } __global__ void mc1f_device256(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; float ctpker; #pragma unroll for(int iter = 0; iter<8; iter++) { ymid = (ub + lb)/2; ctpker = rker[d * ymid]; //this is the bottleneck if(rand < ctpker) ub = ymid; else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } __global__ void mc1f_device128(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; float ctpker; #pragma unroll for(int iter = 0; iter<7; iter++) { ymid = (ub + lb)/2; ctpker = rker[d * ymid]; //this is the bottleneck if(rand < ctpker) ub = ymid; else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } extern "C" __declspec( dllexport ) int opcuda_mc1f(unsigned int mtptr, int y0, unsigned int y_sk, int nscen, int nk, int d, const unsigned int ctpker_m_yy, const unsigned int m_k, const unsigned long int yhostptr) { int n_per_rng = nscen/MT_RNG_COUNT; int size = nscen * nk * sizeof(short); int status = 0; if(d>=1024) return 0; if(d<=1024 && d>512) { hipLaunchKernelGGL(( mc1f_device1024), dim3(32), dim3(128), nk*sizeof(int), 0, (unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } if(d<=512 && d>256) { hipLaunchKernelGGL(( mc1f_device512), dim3(32), dim3(128), nk*sizeof(int), 0, (unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } if(d<=256 && d>128) { hipLaunchKernelGGL(( mc1f_device256), dim3(32), dim3(128), nk*sizeof(int), 0, (unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } if(d<=128) { hipLaunchKernelGGL(( mc1f_device128), dim3(32), dim3(128), nk*sizeof(int), 0, (unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } status = hipMemcpy((void *) yhostptr, (void *) y_sk, size, hipMemcpyDeviceToHost); return status; } __global__ void mc1f_mt_benchmark(unsigned int* mt_rs, float * unif_s, int nscenPerRng){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; for(int scen = 0; scen < nscenPerRng; scen+= 1){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to [0, 1) float float rand = (float)x / 4294967296.0f; __syncthreads(); unif_s[iRng + nRng * scen] = rand; } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } extern "C" __declspec( dllexport ) int opcuda_mt_benchmark(unsigned int mtptr, unsigned int unif_ptr, int nscen) { int n_per_rng = nscen/MT_RNG_COUNT; hipLaunchKernelGGL(( mc1f_mt_benchmark) , dim3(32), dim3(128), 0, 0, (unsigned int *) mtptr, (float *) unif_ptr, n_per_rng); return 0; }
888d71a3cb2daefd497f3dde85b5e7bbc32a79b8.cu
// smc.cu --- Part of the project OPLib 1.0, a high performance pricing library // based on operator methods, higher level BLAS and multicore architectures // Author: 2009 Claudio Albanese // Maintainer: Claudio Albanese <[email protected]> // Created: April-July 2009 // Version: 1.0.0 // Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by // Vasily Volkov's implementation of SGEMM // We use several variations of the multi-threaded Mersenne Twister algorithm of // period 2203 due to Makoto Matsumoto. // The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk // included in the CUDA SDK. // CPU-side BLAS and random number generators link to primitives in the // Intel Math Kernel Libraries. // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; see the file COPYING. If not, write to // the Free Software Foundation, Inc., 59 Temple Place - Suite 330, // Boston, MA 02111-1307, USA. #include "MersenneTwister.h" #include <cuda.h> #define MAX_NUMBER_OF_FACTORS 100 // must be an even number #define CHOLM_SZ MAX_NUMBER_OF_FACTORS*MAX_NUMBER_OF_FACTORS #ifdef LINUX #define __declspec(x) #endif __constant__ float cholm[CHOLM_SZ]; __device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT]; static mt_struct_stripped h_MT[MT_RNG_COUNT]; #define PI 3.14159265358979f __device__ void BoxMuller(float& u1, float& u2){ float r = sqrtf(-2.0f * logf(u1)); float phi = 2 * PI * u2; u1 = r * __cosf(phi); u2 = r * __sinf(phi); } /////////////////////////////////////////////////////////////////////////////// // Polynomial approximation of cumulative normal distribution function /////////////////////////////////////////////////////////////////////////////// __device__ inline float cndGPU(float d) { const float A1 = 0.31938153f; const float A2 = -0.356563782f; const float A3 = 1.781477937f; const float A4 = -1.821255978f; const float A5 = 1.330274429f; const float RSQRT2PI = 0.39894228040143267793994605993438f; float K = 1.0f / (1.0f + 0.2316419f * fabsf(d)); float cnd = RSQRT2PI * __expf(- 0.5f * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if(d > 0) cnd = 1.0f - cnd; return cnd; } //Load twister configurations extern "C" __declspec( dllexport ) int opcuda_mc_load_mt_gpu(char * MT_stream, long sz){ char * h_MT_ptr = (char *) MT_stream; if(sz != sizeof(h_MT)) return 1; for(int i = 0; i<sizeof(h_MT); i++) { h_MT_ptr[i] = MT_stream[i]; } return 0; } extern "C" __declspec( dllexport ) int opcuda_mc_nrng() { return MT_RNG_COUNT; } extern "C" __declspec( dllexport ) int opcuda_mc_status_sz() { return MT_NN*MT_RNG_COUNT; } //////////////////////////////////////////////////////////////////////////////// // Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random. // For coalesced global writes MT_RNG_COUNT should be a multiple of warp size. // Initial states for each generator are the same, since the states are // initialized from the global seed. In order to improve distribution properties // on small NPerRng we supply dedicated (local) seeds to each twister. //////////////////////////////////////////////////////////////////////////////// __global__ void mc_setseed_device(unsigned int* mt_rs) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int iRng = tid; const int nRng = blockDim.x * gridDim.x; mt_struct_stripped config = ds_MT[iRng]; //Initialize state variable mt_rs[tid + nRng * 0] = config.seed; for(int iState = 1; iState < MT_NN; iState++) mt_rs[tid + nRng * iState] = (1812433253U * (mt_rs[tid + nRng * (iState - 1)] ^ (mt_rs[tid + nRng *(iState - 1)] >> 30)) + iState) & MT_WMASK; } //Initialize/seed twister for current GPU context extern "C" __declspec( dllexport ) void opcuda_mc_setseed(unsigned long int host_seedptr, unsigned int mtptr){ int i; //Need to be thread-safe mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped)); unsigned int * seed = (unsigned int *) host_seedptr; for(i = 0; i < MT_RNG_COUNT; i++) { MT[i] = h_MT[i]; MT[i].seed = seed[i]; } cudaMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)); mc_setseed_device<<<64, 64>>>((unsigned int*) mtptr); free(MT); } __global__ void mc1f_device1024(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; #pragma unroll for(int iter = 0; iter<10; iter++) { ymid = (ub + lb)/2; if(rand < rker[d * ymid]) ub = ymid; //this is the bottleneck else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } __global__ void mc1f_device512(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; #pragma unroll for(int iter = 0; iter<9; iter++) { ymid = (ub + lb)/2; if(rand < rker[d * ymid]) ub = ymid; //this read is the bottleneck else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } __global__ void mc1f_device256(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; float ctpker; #pragma unroll for(int iter = 0; iter<8; iter++) { ymid = (ub + lb)/2; ctpker = rker[d * ymid]; //this is the bottleneck if(rand < ctpker) ub = ymid; else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } __global__ void mc1f_device128(unsigned int* mt_rs, short y0, short *y_sk, int nscenPerRng, const int nk, const int d, float *ctpker_m_yy, int *m_k){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; short yprevious; float rand; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; int scen = 0; int k = 0; yprevious = y0; for(; ;){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to (0, 1] float rand = ((float)x + 1.0f) / 4294967296.0f; short *ry = y_sk + iRng + nRng * scen + nRng * nscenPerRng * k; int ub, lb; int m = m_k[k]; float *rker = &ctpker_m_yy[m*d*d+yprevious]; ub = d-1; lb = 0; int ymid; short y; float ctpker; #pragma unroll for(int iter = 0; iter<7; iter++) { ymid = (ub + lb)/2; ctpker = rker[d * ymid]; //this is the bottleneck if(rand < ctpker) ub = ymid; else lb = ymid; } y = ub; //uncomment the following for debug checks //if(ub > lb +1) y = -1; //this will trigger an exception //if(rker[d * ub] < rand) y = - 1000 - ub; //this will trigger an exception //if(rker[d * lb] > rand && lb>0) y = -2000 - lb; //this will trigger an exception __syncthreads(); *ry = y; ry += nRng * nscenPerRng; yprevious = y; if(++k >= nk) { k = 0; yprevious = y0; if(++scen >= nscenPerRng) break; } } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } extern "C" __declspec( dllexport ) int opcuda_mc1f(unsigned int mtptr, int y0, unsigned int y_sk, int nscen, int nk, int d, const unsigned int ctpker_m_yy, const unsigned int m_k, const unsigned long int yhostptr) { int n_per_rng = nscen/MT_RNG_COUNT; int size = nscen * nk * sizeof(short); int status = 0; if(d>=1024) return 0; if(d<=1024 && d>512) { mc1f_device1024<<<32, 128, nk*sizeof(int)>>> ((unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } if(d<=512 && d>256) { mc1f_device512<<<32, 128, nk*sizeof(int)>>> ((unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } if(d<=256 && d>128) { mc1f_device256<<<32, 128, nk*sizeof(int)>>> ((unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } if(d<=128) { mc1f_device128<<<32, 128, nk*sizeof(int)>>> ((unsigned int *) mtptr, y0, (short *) y_sk, n_per_rng, nk, d, (float *) ctpker_m_yy, (int *) m_k); } status = cudaMemcpy((void *) yhostptr, (void *) y_sk, size, cudaMemcpyDeviceToHost); return status; } __global__ void mc1f_mt_benchmark(unsigned int* mt_rs, float * unif_s, int nscenPerRng){ const int iRng = blockDim.x * blockIdx.x + threadIdx.x; const int nRng = blockDim.x * gridDim.x; int iState, iState1, iStateM; unsigned int mti, mti1, mtiM, x; unsigned int mt[MT_NN]; unsigned *rmt = &mt_rs[iRng]; //coalesced read of status vector #pragma unroll for(iState = 0; iState < MT_NN; iState++) { mt[iState] = *rmt; rmt += nRng; } __syncthreads(); //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; iState = 0; mti1 = mt[0]; for(int scen = 0; scen < nscenPerRng; scen+= 1){ //Mersenne Twister iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //convert to [0, 1) float float rand = (float)x / 4294967296.0f; __syncthreads(); unif_s[iRng + nRng * scen] = rand; } //end for(;;) //save status vector of random number generator rmt = &mt_rs[iRng]; __syncthreads(); #pragma unroll for(iState = 0; iState < MT_NN; iState++) { *rmt = mt[iState]; rmt += nRng; } } extern "C" __declspec( dllexport ) int opcuda_mt_benchmark(unsigned int mtptr, unsigned int unif_ptr, int nscen) { int n_per_rng = nscen/MT_RNG_COUNT; mc1f_mt_benchmark <<<32, 128>>> ((unsigned int *) mtptr, (float *) unif_ptr, n_per_rng); return 0; }
d4aa8f3a931bfa1bdd784eb78fb594dda457509f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/core/Array.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { // ----------------------------------- // prelu forward // ----------------------------------- template <typename scalar_t> void prelu_cuda_kernel_share_weights( const Tensor& input, Tensor& result, const scalar_t* weight_data) { auto iter = TensorIterator::unary_op(result, input); at::native::gpu_kernel(iter, [weight_data] GPU_LAMBDA (scalar_t input_val) { return (input_val > 0) ? input_val : *weight_data * input_val; }); } template <typename scalar_t> __global__ void prelu_cuda_kernel_multi_weights( scalar_t* result_data, const scalar_t* input_data, const scalar_t* weight_data, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; // multiply values at each channel with weight[channel_index] int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val; } Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) { TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); int64_t weight_num = weight.numel(); Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto strides = input.strides(); // case1: shared weight for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_share_weights<scalar_t>( input, result, weight.data_ptr<scalar_t>()); }); } else { // case2: multiple weights, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>) , dim3(grid), dim3(block), 0, stream, result.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } return result; } // ----------------------------------- // prelu backward // ----------------------------------- template <typename scalar_t> void prelu_cuda_backward_kernel_share_weights( const Tensor& input, const Tensor& grad_out, Tensor& input_grad, Tensor& weight_grad_collector, const scalar_t* weight_data) { at::TensorIterator iter = TensorIteratorConfig() .add_output(input_grad) .add_output(weight_grad_collector) .add_input(input) .add_input(grad_out) .build(); // N.B. `std::tuple` does not support `::operator=` on device code. gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> { scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out; scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out; return {input_grad, weight_grad_collector}; }); } template <typename scalar_t> __global__ void prelu_cuda_backward_kernel_multi_weights( const scalar_t* input_data, const scalar_t* weight_data, const scalar_t* grad_out_data, scalar_t* input_grad_data, scalar_t* weight_grad_collector, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; scalar_t grad_out_data_val = grad_out_data[linearId]; input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val; weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val; } std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) { TORCH_CHECK(grad_out_.is_cuda()); TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto grad_out = grad_out_.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_out.is_contiguous()); int64_t weight_num = weight.numel(); auto strides = input.strides(); auto dims = input.dim(); Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // case1: shared parameter for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_share_weights<scalar_t>( input, grad_out, input_grad, weight_grad_collector, weight.data_ptr<scalar_t>()); }); weight_grad.fill_(weight_grad_collector.sum()); } else { // case2: multiple parameters, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>) , dim3(grid), dim3(block), 0, stream, input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), grad_out.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), weight_grad_collector.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_HIP_KERNEL_LAUNCH_CHECK(); }); // update weight_grad std::vector<int64_t> reduce_dims; reduce_dims.push_back(0); if (dims > 2) { for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i); } weight_grad = weight_grad_collector.sum(reduce_dims); } return std::tuple<Tensor, Tensor>{input_grad, weight_grad}; } // ----------------------------------- // hardshrink // ----------------------------------- void hardshrink_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a >= -lambd && a <= lambd) ? scalar_t(0) : a; }); }); } void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0)); }); }); } void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t { return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val; }); }); } void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() { auto min_val = min.to<scalar_t>(); auto max_val = max.to<scalar_t>(); gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a; }); }); } void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta; }); }); } void softplus_backward_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t z = ::exp(b * beta); return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.)); }); }); } template <typename scalar_t> void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) { gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t { return x <= threshold ? value : other; }); } static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] { threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>()); }); } void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef; }); }); } void elu_backward_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { if (is_result) { return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef; } else { return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(::exp(b * negiptcoef))) : a * poscoef; } }); }); } namespace { void GeluCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t { return static_cast<T_ACC>(x) * c10::hip::compat::normcdf(static_cast<T_ACC>(x)); }); }); } void GeluBackwardCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5); const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x)); const T_ACC pdf = c10::hip::compat::exp( T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) * kBeta; return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf); }); }); } void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a : a * negval; }); }); } void leaky_relu_backward_kernel(TensorIterator& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a > scalar_t(0) ? b : b * negval; }); }); } void hardswish_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return x * ::min(::max(x + three, zero), six) * one_sixth; }); }); } void hardswish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_half(0.5f); gpu_kernel( iter, [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); if (self_val < neg_three) { return zero; } else if (self_val <= three) { return grad_val * ((self_val / three) + one_half); } else { return grad_val; } }); }); } void hardsigmoid_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return ::min(::max(x + three, zero), six) * one_sixth; }); }); } void hardsigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_sixth(1.0f / 6.0f); gpu_kernel( iter, [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); return (self_val > neg_three && self_val < three) ? grad_val * one_sixth : zero; }); }); } void silu_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC x_acc = static_cast<T_ACC>(x); return x_acc / (T_ACC(1) + c10::hip::compat::exp(-x_acc)); }); }); } void silu_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_backward_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); const T_ACC s_acc = T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc)); return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc)); }); }); } } // namespace Tensor gelu_cuda(const Tensor& self) { Tensor Y = at::native::empty_like( self, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::unary_op(Y, self); GeluCUDAKernelImpl(it); return Y; } Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) { Tensor dX = at::native::empty_like( self, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::binary_op(dX, grad, self); GeluBackwardCUDAKernelImpl(it); return dX; } REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel); REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel); REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel); REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel); REGISTER_DISPATCH(elu_stub, &elu_kernel); REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel); REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel); REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel); REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel); REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel); REGISTER_DISPATCH(softplus_stub, &softplus_kernel); REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel); REGISTER_DISPATCH(silu_stub, &silu_kernel); REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel); REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda); } // namespace native } // namespace at
d4aa8f3a931bfa1bdd784eb78fb594dda457509f.cu
#define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/core/Array.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { // ----------------------------------- // prelu forward // ----------------------------------- template <typename scalar_t> void prelu_cuda_kernel_share_weights( const Tensor& input, Tensor& result, const scalar_t* weight_data) { auto iter = TensorIterator::unary_op(result, input); at::native::gpu_kernel(iter, [weight_data] GPU_LAMBDA (scalar_t input_val) { return (input_val > 0) ? input_val : *weight_data * input_val; }); } template <typename scalar_t> __global__ void prelu_cuda_kernel_multi_weights( scalar_t* result_data, const scalar_t* input_data, const scalar_t* weight_data, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; // multiply values at each channel with weight[channel_index] int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val; } Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) { TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); int64_t weight_num = weight.numel(); Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto strides = input.strides(); // case1: shared weight for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_share_weights<scalar_t>( input, result, weight.data_ptr<scalar_t>()); }); } else { // case2: multiple weights, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_multi_weights<scalar_t> <<<grid, block, 0, stream>>>( result.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } return result; } // ----------------------------------- // prelu backward // ----------------------------------- template <typename scalar_t> void prelu_cuda_backward_kernel_share_weights( const Tensor& input, const Tensor& grad_out, Tensor& input_grad, Tensor& weight_grad_collector, const scalar_t* weight_data) { at::TensorIterator iter = TensorIteratorConfig() .add_output(input_grad) .add_output(weight_grad_collector) .add_input(input) .add_input(grad_out) .build(); // N.B. `std::tuple` does not support `::operator=` on device code. gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> { scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out; scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out; return {input_grad, weight_grad_collector}; }); } template <typename scalar_t> __global__ void prelu_cuda_backward_kernel_multi_weights( const scalar_t* input_data, const scalar_t* weight_data, const scalar_t* grad_out_data, scalar_t* input_grad_data, scalar_t* weight_grad_collector, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; scalar_t grad_out_data_val = grad_out_data[linearId]; input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val; weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val; } std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) { TORCH_CHECK(grad_out_.is_cuda()); TORCH_CHECK(self.is_cuda()); TORCH_CHECK(weight_.is_cuda()); auto input = self.contiguous(); auto grad_out = grad_out_.contiguous(); auto weight = weight_.contiguous(); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_out.is_contiguous()); int64_t weight_num = weight.numel(); auto strides = input.strides(); auto dims = input.dim(); Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // case1: shared parameter for all channels if (weight_num == 1) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_share_weights<scalar_t>( input, grad_out, input_grad, weight_grad_collector, weight.data_ptr<scalar_t>()); }); weight_grad.fill_(weight_grad_collector.sum()); } else { // case2: multiple parameters, one for each channel int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input input_stride0 = strides[0]; input_stride1 = strides[1]; } TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_multi_weights<scalar_t> <<<grid, block, 0, stream>>>( input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), grad_out.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), weight_grad_collector.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); // update weight_grad std::vector<int64_t> reduce_dims; reduce_dims.push_back(0); if (dims > 2) { for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i); } weight_grad = weight_grad_collector.sum(reduce_dims); } return std::tuple<Tensor, Tensor>{input_grad, weight_grad}; } // ----------------------------------- // hardshrink // ----------------------------------- void hardshrink_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a >= -lambd && a <= lambd) ? scalar_t(0) : a; }); }); } void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0)); }); }); } void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t { return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val; }); }); } void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() { auto min_val = min.to<scalar_t>(); auto max_val = max.to<scalar_t>(); gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a; }); }); } void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta; }); }); } void softplus_backward_kernel(TensorIterator& iter, const Scalar& beta_, const Scalar& threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t z = std::exp(b * beta); return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.)); }); }); } template <typename scalar_t> void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) { gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t { return x <= threshold ? value : other; }); } static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] { threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>()); }); } void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef; }); }); } void elu_backward_kernel(TensorIterator& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { if (is_result) { return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef; } else { return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef; } }); }); } namespace { void GeluCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t { return static_cast<T_ACC>(x) * c10::cuda::compat::normcdf(static_cast<T_ACC>(x)); }); }); } void GeluBackwardCUDAKernelImpl(TensorIterator& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5); const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x)); const T_ACC pdf = c10::cuda::compat::exp( T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) * kBeta; return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf); }); }); } void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a : a * negval; }); }); } void leaky_relu_backward_kernel(TensorIterator& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a > scalar_t(0) ? b : b * negval; }); }); } void hardswish_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return x * std::min(std::max(x + three, zero), six) * one_sixth; }); }); } void hardswish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_half(0.5f); gpu_kernel( iter, [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); if (self_val < neg_three) { return zero; } else if (self_val <= three) { return grad_val * ((self_val / three) + one_half); } else { return grad_val; } }); }); } void hardsigmoid_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return std::min(std::max(x + three, zero), six) * one_sixth; }); }); } void hardsigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_sixth(1.0f / 6.0f); gpu_kernel( iter, [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); return (self_val > neg_three && self_val < three) ? grad_val * one_sixth : zero; }); }); } void silu_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC x_acc = static_cast<T_ACC>(x); return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); }); }); } void silu_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_backward_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); const T_ACC s_acc = T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc)); }); }); } } // namespace Tensor gelu_cuda(const Tensor& self) { Tensor Y = at::native::empty_like( self, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::unary_op(Y, self); GeluCUDAKernelImpl(it); return Y; } Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) { Tensor dX = at::native::empty_like( self, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto it = TensorIterator::binary_op(dX, grad, self); GeluBackwardCUDAKernelImpl(it); return dX; } REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel); REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel); REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel); REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel); REGISTER_DISPATCH(elu_stub, &elu_kernel); REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel); REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel); REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel); REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel); REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel); REGISTER_DISPATCH(softplus_stub, &softplus_kernel); REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel); REGISTER_DISPATCH(silu_stub, &silu_kernel); REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel); REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda); } // namespace native } // namespace at
ebf2d238b2c0a9772b32658dad90a06b7a840094.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gmv.cuh" #include "stdio.h" //Calculates c=alpha*A*x+beta*c, for matrix A (dimensions nxm), vectors c,x and scalars alpha, beta __global__ void k_gmv_f32(int n, int m, float alpha, float* A, int stride_row_a, int stride_col_a,float* x, int stride_x, float beta, float* c, int stride_c){ const int TILE_SIZE=64; int tx=threadIdx.x; int bx=blockIdx.x; int col=bx*TILE_SIZE+tx; if (col<n){ int q=m/TILE_SIZE; int rem=m%TILE_SIZE; float* ptrA=A+col*stride_col_a; //float* a_end=ptrA+TILE_SIZE*stride_col_a; float* ptrX=x; __shared__ float buf[TILE_SIZE]; float sum=0.0; for (int i=0;i<q;i++){ for (int j=0;j<TILE_SIZE;j++){ buf[j]=ptrX[j*stride_x]; } #pragma unroll for (int j=0;j<TILE_SIZE;j++){ sum+=buf[j]*ptrA[j*stride_row_a]; } ptrA+=TILE_SIZE*stride_row_a; ptrX+=TILE_SIZE*stride_x; __syncthreads(); } if (rem>0){ for (int j=0;j<rem;j++){ buf[j]=ptrX[j*stride_x]; } for (int j=0;j<rem;j++){ sum+=buf[j]*ptrA[j*stride_row_a]; } } c[col*stride_c]=beta*c[col*stride_c]+alpha*sum; } } __host__ void gmv_f32_device(int n, int m, float alpha, float* A_d, int stride_row_a, int stride_col_a, float* x, int stride_x, float beta, float* C_d, int stride_c){ float bsmx=64; //blocksize x dim3 threadLayout=dim3(bsmx,1,1); dim3 grid=dim3(ceil(n/bsmx),1,1); hipLaunchKernelGGL(( k_gmv_f32), dim3(grid),dim3(threadLayout), 0, 0, n,m,alpha,A_d,stride_row_a,stride_col_a,x,stride_x,beta,C_d,stride_c); } __host__ void gmv_f32(int n, int m, float alpha, float* A_h, int stride_row_a, int stride_col_a, float* x_h, int stride_x, float beta, float* C_h, int stride_c){ if ((n==0) || (m==0)){ return; } float* A_d; float* x_d; float* C_d; int sizeA=sizeof(float)*n*m; int sizeX=sizeof(float)*m; hipMalloc((void**)&A_d, sizeA); hipMalloc((void**)&x_d,sizeX); hipMalloc((void**)&C_d,sizeA); hipError_t copy1=hipMemcpy((void*) A_d, (void*) A_h, sizeA,hipMemcpyHostToDevice); hipError_t copy2=hipMemcpy((void*) x_d, (void*) x_h, sizeX,hipMemcpyHostToDevice); hipError_t copy3=hipMemcpy((void*) C_d, (void*) C_h, sizeA,hipMemcpyHostToDevice); if ((copy1==hipSuccess) && (copy2==hipSuccess) && (copy3==hipSuccess)){ float bsmx=64; //blocksize x dim3 threadLayout=dim3(bsmx,1,1); dim3 grid=dim3(ceil(n/bsmx),1,1); hipLaunchKernelGGL(( k_gmv_f32), dim3(grid),dim3(threadLayout), 0, 0, n,m,alpha,A_d,stride_row_a,stride_col_a,x_d,stride_x,beta,C_d,stride_c); hipMemcpy((void*)C_h,(void*)C_d,sizeA,hipMemcpyDeviceToHost); } else{ printf("Error copying value to device in gmv_f32\n"); } hipFree(A_d); hipFree(x_d); hipFree(C_d); }
ebf2d238b2c0a9772b32658dad90a06b7a840094.cu
#include "gmv.cuh" #include "stdio.h" //Calculates c=alpha*A*x+beta*c, for matrix A (dimensions nxm), vectors c,x and scalars alpha, beta __global__ void k_gmv_f32(int n, int m, float alpha, float* A, int stride_row_a, int stride_col_a,float* x, int stride_x, float beta, float* c, int stride_c){ const int TILE_SIZE=64; int tx=threadIdx.x; int bx=blockIdx.x; int col=bx*TILE_SIZE+tx; if (col<n){ int q=m/TILE_SIZE; int rem=m%TILE_SIZE; float* ptrA=A+col*stride_col_a; //float* a_end=ptrA+TILE_SIZE*stride_col_a; float* ptrX=x; __shared__ float buf[TILE_SIZE]; float sum=0.0; for (int i=0;i<q;i++){ for (int j=0;j<TILE_SIZE;j++){ buf[j]=ptrX[j*stride_x]; } #pragma unroll for (int j=0;j<TILE_SIZE;j++){ sum+=buf[j]*ptrA[j*stride_row_a]; } ptrA+=TILE_SIZE*stride_row_a; ptrX+=TILE_SIZE*stride_x; __syncthreads(); } if (rem>0){ for (int j=0;j<rem;j++){ buf[j]=ptrX[j*stride_x]; } for (int j=0;j<rem;j++){ sum+=buf[j]*ptrA[j*stride_row_a]; } } c[col*stride_c]=beta*c[col*stride_c]+alpha*sum; } } __host__ void gmv_f32_device(int n, int m, float alpha, float* A_d, int stride_row_a, int stride_col_a, float* x, int stride_x, float beta, float* C_d, int stride_c){ float bsmx=64; //blocksize x dim3 threadLayout=dim3(bsmx,1,1); dim3 grid=dim3(ceil(n/bsmx),1,1); k_gmv_f32<<<grid,threadLayout>>>(n,m,alpha,A_d,stride_row_a,stride_col_a,x,stride_x,beta,C_d,stride_c); } __host__ void gmv_f32(int n, int m, float alpha, float* A_h, int stride_row_a, int stride_col_a, float* x_h, int stride_x, float beta, float* C_h, int stride_c){ if ((n==0) || (m==0)){ return; } float* A_d; float* x_d; float* C_d; int sizeA=sizeof(float)*n*m; int sizeX=sizeof(float)*m; cudaMalloc((void**)&A_d, sizeA); cudaMalloc((void**)&x_d,sizeX); cudaMalloc((void**)&C_d,sizeA); cudaError_t copy1=cudaMemcpy((void*) A_d, (void*) A_h, sizeA,cudaMemcpyHostToDevice); cudaError_t copy2=cudaMemcpy((void*) x_d, (void*) x_h, sizeX,cudaMemcpyHostToDevice); cudaError_t copy3=cudaMemcpy((void*) C_d, (void*) C_h, sizeA,cudaMemcpyHostToDevice); if ((copy1==cudaSuccess) && (copy2==cudaSuccess) && (copy3==cudaSuccess)){ float bsmx=64; //blocksize x dim3 threadLayout=dim3(bsmx,1,1); dim3 grid=dim3(ceil(n/bsmx),1,1); k_gmv_f32<<<grid,threadLayout>>>(n,m,alpha,A_d,stride_row_a,stride_col_a,x_d,stride_x,beta,C_d,stride_c); cudaMemcpy((void*)C_h,(void*)C_d,sizeA,cudaMemcpyDeviceToHost); } else{ printf("Error copying value to device in gmv_f32\n"); } cudaFree(A_d); cudaFree(x_d); cudaFree(C_d); }
21658e32b502625714b6108476bdeee6bc6776b6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "writeOffsetUnroll2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); const int n = 1; int offset = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( writeOffsetUnroll2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n,offset); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( writeOffsetUnroll2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n,offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( writeOffsetUnroll2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n,offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
21658e32b502625714b6108476bdeee6bc6776b6.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "writeOffsetUnroll2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); const int n = 1; int offset = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); writeOffsetUnroll2<<<gridBlock,threadBlock>>>(A,B,C,n,offset); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { writeOffsetUnroll2<<<gridBlock,threadBlock>>>(A,B,C,n,offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { writeOffsetUnroll2<<<gridBlock,threadBlock>>>(A,B,C,n,offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b8935c21f64e5994d8e149cc6932302f038ad5a3.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file indexing_op.cu * \brief * \author Siyi Li, Chi Zhang */ #include "./indexing_op.h" #include "./util/tensor_util-inl.cuh" namespace mxnet { namespace op { /*! \brief If there are out-of-bound indices, out will be assigned to 1. */ struct is_valid_check { template<typename DType> MSHADOW_XINLINE static void Map(int i, int32_t* out, const DType* data, const DType min, const DType max) { if (data[i] < min || data[i] > max) *out = 1; } }; struct AddTakeGradRspGPUKernel { template<typename DType, typename IType> __device__ __forceinline__ static void Map(int tid, DType* out, const nnvm::dim_t* prefix_sum, const IType* data, const DType* ograd, const nnvm::dim_t row_length) { using nnvm::dim_t; const dim_t data_i = tid / row_length; const dim_t grad_i = tid % row_length; const dim_t irow = static_cast<dim_t>(data[data_i]); const dim_t rsp_row = prefix_sum[irow] - 1; const DType val = ograd[data_i * row_length + grad_i]; atomicAdd(static_cast<DType *>(&(out[rsp_row*row_length+grad_i])), val); } }; template<> void SparseEmbeddingOpForwardRspImpl<gpu>(mshadow::Stream<gpu>* s, const TBlob& data, const NDArray& weight, const OpReqType req, const TBlob& output) { if (req == kNullOp) return; using namespace rowsparse; using namespace mxnet_op; // zeros weight if (req == kWriteTo && !weight.storage_initialized()) { size_t out_size = output.shape_.Size(); MSHADOW_TYPE_SWITCH(output.type_flag_, DType, { Fill<false>(s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size), gpu::kDevMask), kWriteTo, 0); }) return; } // check out-of-bound indices int32_t is_valid = 0; MSHADOW_TYPE_SWITCH(data.type_flag_, DType, { DType min = 0; DType max = static_cast<DType>(weight.shape()[0] - 1); DType* data_ptr = data.dptr<DType>(); size_t data_size = data.shape_.Size(); int32_t* is_valid_ptr = NULL; CUDA_CALL(hipMalloc(&is_valid_ptr, sizeof(int32_t))); Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr); Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max); CUDA_CALL(hipMemcpy(&is_valid, is_valid_ptr, sizeof(int32_t), hipMemcpyDeviceToHost)); }) CHECK_EQ(is_valid, 0) << "SparseEmbedding input contains data out of bound"; // the weight is actually dense if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) { EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output); } else { EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output); } } template<> inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const OpContext& ctx, const TBlob& ograd, const TBlob& data, const OpReqType req, const NDArray& output) { using namespace mshadow; using namespace mxnet_op; using namespace mshadow::expr; using namespace rowsparse; using nnvm::dim_t; if (req == kNullOp) return; CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support " << "weight gradient calculation with req != write"; // Request temporary storage for marking non-zero rows and prefix sum Stream<gpu> *s = ctx.get_stream<gpu>(); dim_t num_rows = output.shape()[0]; dim_t row_length = output.shape()[1]; dim_t data_size = static_cast<dim_t>(data.shape_.Size()); dim_t num_threads; MSHADOW_TYPE_SWITCH(data.type_flag_, IType, { MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, { dim_t* prefix_sum = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, num_rows, Stream<gpu>::GetStream(s)); Tensor<gpu, 1, char> workspace = ctx.requested[0] .get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(dim_t) + temp_storage_bytes), s); prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t); num_threads = num_rows; Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0); Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>()); hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, num_rows, mshadow::Stream<gpu>::GetStream(s)); dim_t nnr = 0; CUDA_CALL(hipMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t), hipMemcpyDeviceToHost)); if (nnr == 0) { FillZerosRspImpl(s, output); return; } output.CheckAndAlloc({Shape1(nnr)}); RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>(); // fill row_idx array of output matrix, using the row_flg values Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows, grad_row_idx, prefix_sum, num_rows); // prefill with zeros DType* grad_data = output.data().dptr<DType>(); Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0); // add the final gradients num_threads = row_length * data_size; Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s, num_threads, grad_data, prefix_sum, data.dptr<IType>(), ograd.dptr<DType>(), row_length); }); }); }); } NNVM_REGISTER_OP(Embedding) .set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>); NNVM_REGISTER_OP(_contrib_SparseEmbedding) .set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>); NNVM_REGISTER_OP(_backward_Embedding) .set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>); NNVM_REGISTER_OP(_backward_SparseEmbedding) .set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpBackwardEx<gpu>); NNVM_REGISTER_OP(take) .set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>); NNVM_REGISTER_OP(_backward_take) .set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>); NNVM_REGISTER_OP(batch_take) .set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>); NNVM_REGISTER_OP(one_hot) .set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>); NNVM_REGISTER_OP(gather_nd) .set_attr<FCompute>("FCompute<gpu>", GatherNDForward<gpu>); NNVM_REGISTER_OP(scatter_nd) .set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>); NNVM_REGISTER_OP(_scatter_set_nd) .set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>); } // namespace op } // namespace mxnet
b8935c21f64e5994d8e149cc6932302f038ad5a3.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file indexing_op.cu * \brief * \author Siyi Li, Chi Zhang */ #include "./indexing_op.h" #include "./util/tensor_util-inl.cuh" namespace mxnet { namespace op { /*! \brief If there are out-of-bound indices, out will be assigned to 1. */ struct is_valid_check { template<typename DType> MSHADOW_XINLINE static void Map(int i, int32_t* out, const DType* data, const DType min, const DType max) { if (data[i] < min || data[i] > max) *out = 1; } }; struct AddTakeGradRspGPUKernel { template<typename DType, typename IType> __device__ __forceinline__ static void Map(int tid, DType* out, const nnvm::dim_t* prefix_sum, const IType* data, const DType* ograd, const nnvm::dim_t row_length) { using nnvm::dim_t; const dim_t data_i = tid / row_length; const dim_t grad_i = tid % row_length; const dim_t irow = static_cast<dim_t>(data[data_i]); const dim_t rsp_row = prefix_sum[irow] - 1; const DType val = ograd[data_i * row_length + grad_i]; atomicAdd(static_cast<DType *>(&(out[rsp_row*row_length+grad_i])), val); } }; template<> void SparseEmbeddingOpForwardRspImpl<gpu>(mshadow::Stream<gpu>* s, const TBlob& data, const NDArray& weight, const OpReqType req, const TBlob& output) { if (req == kNullOp) return; using namespace rowsparse; using namespace mxnet_op; // zeros weight if (req == kWriteTo && !weight.storage_initialized()) { size_t out_size = output.shape_.Size(); MSHADOW_TYPE_SWITCH(output.type_flag_, DType, { Fill<false>(s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size), gpu::kDevMask), kWriteTo, 0); }) return; } // check out-of-bound indices int32_t is_valid = 0; MSHADOW_TYPE_SWITCH(data.type_flag_, DType, { DType min = 0; DType max = static_cast<DType>(weight.shape()[0] - 1); DType* data_ptr = data.dptr<DType>(); size_t data_size = data.shape_.Size(); int32_t* is_valid_ptr = NULL; CUDA_CALL(cudaMalloc(&is_valid_ptr, sizeof(int32_t))); Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr); Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max); CUDA_CALL(cudaMemcpy(&is_valid, is_valid_ptr, sizeof(int32_t), cudaMemcpyDeviceToHost)); }) CHECK_EQ(is_valid, 0) << "SparseEmbedding input contains data out of bound"; // the weight is actually dense if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) { EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output); } else { EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output); } } template<> inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const OpContext& ctx, const TBlob& ograd, const TBlob& data, const OpReqType req, const NDArray& output) { using namespace mshadow; using namespace mxnet_op; using namespace mshadow::expr; using namespace rowsparse; using nnvm::dim_t; if (req == kNullOp) return; CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support " << "weight gradient calculation with req != write"; // Request temporary storage for marking non-zero rows and prefix sum Stream<gpu> *s = ctx.get_stream<gpu>(); dim_t num_rows = output.shape()[0]; dim_t row_length = output.shape()[1]; dim_t data_size = static_cast<dim_t>(data.shape_.Size()); dim_t num_threads; MSHADOW_TYPE_SWITCH(data.type_flag_, IType, { MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, { dim_t* prefix_sum = NULL; void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, num_rows, Stream<gpu>::GetStream(s)); Tensor<gpu, 1, char> workspace = ctx.requested[0] .get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(dim_t) + temp_storage_bytes), s); prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_); d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t); num_threads = num_rows; Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0); Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>()); cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, num_rows, mshadow::Stream<gpu>::GetStream(s)); dim_t nnr = 0; CUDA_CALL(cudaMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t), cudaMemcpyDeviceToHost)); if (nnr == 0) { FillZerosRspImpl(s, output); return; } output.CheckAndAlloc({Shape1(nnr)}); RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>(); // fill row_idx array of output matrix, using the row_flg values Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows, grad_row_idx, prefix_sum, num_rows); // prefill with zeros DType* grad_data = output.data().dptr<DType>(); Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0); // add the final gradients num_threads = row_length * data_size; Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s, num_threads, grad_data, prefix_sum, data.dptr<IType>(), ograd.dptr<DType>(), row_length); }); }); }); } NNVM_REGISTER_OP(Embedding) .set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>); NNVM_REGISTER_OP(_contrib_SparseEmbedding) .set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>); NNVM_REGISTER_OP(_backward_Embedding) .set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>); NNVM_REGISTER_OP(_backward_SparseEmbedding) .set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpBackwardEx<gpu>); NNVM_REGISTER_OP(take) .set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>); NNVM_REGISTER_OP(_backward_take) .set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>); NNVM_REGISTER_OP(batch_take) .set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>); NNVM_REGISTER_OP(one_hot) .set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>); NNVM_REGISTER_OP(gather_nd) .set_attr<FCompute>("FCompute<gpu>", GatherNDForward<gpu>); NNVM_REGISTER_OP(scatter_nd) .set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>); NNVM_REGISTER_OP(_scatter_set_nd) .set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>); } // namespace op } // namespace mxnet
d577d2d948a388f0e956d0192301a6394a261f69.hip
// !!! This is a file automatically generated by hipify!!! // // Created by smallflyfly on 2021/5/27. // #include <stdio.h> #include <hip/hip_runtime.h> #include "cuda_code.h" __global__ void warmUp(int *iData, int *oData, int n) { unsigned int tid = threadIdx.x; if (tid >= n) return; int *data = iData + blockIdx.x * blockDim.x; for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { data[tid] += data[tid + stride]; } // synchronize within block __syncthreads(); } if (tid == 0) { oData[blockIdx.x] = data[0]; } } int main(int argc, char **argv) { initDevice(0); int size = 1 << 24; int blockSize = 1024; if (argc > 1) { blockSize = atoi(argv[1]); } dim3 block(blockSize, 1); dim3 grid((size - 1) / block.x + 1, 1); size_t bytes = size * sizeof(int); int *iDataH = (int*) malloc(bytes); int *oDataH = (int*) malloc(grid.x * sizeof(int)); int *tmp = (int*) malloc(bytes); initDataInt(iDataH, size); memcpy(tmp, iDataH, bytes); double iStart, iElaps; int gupSum = 0; int *iDataD = NULL; int *oDataD = NULL; CHECK(hipMalloc((void**)&iDataD, bytes)); CHECK(hipMalloc((void**)&oDataD, grid.x * sizeof(int))); int cpuSum = 0; iStart = cpuSecond(); for (int i = 0; i < size; i++) { cpuSum += tmp[i]; } printf("cpu sum: %d\n", cpuSum); iElaps = cpuSecond() - iStart; printf("cpu cost time %lf ms\n", iElaps); CHECK(hipMemcpyAsync(iDataD, iDataH, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = cpuSecond(); hipLaunchKernelGGL(( warmUp), dim3(grid.x / 2), dim3(block), 0, 0, iDataD, oDataD, size); hipDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("gpu warmup elapsed %lf ms\n", iElaps); // cpu int cpu; hipDeviceReset(); return 0; }
d577d2d948a388f0e956d0192301a6394a261f69.cu
// // Created by smallflyfly on 2021/5/27. // #include <stdio.h> #include <cuda_runtime.h> #include "cuda_code.h" __global__ void warmUp(int *iData, int *oData, int n) { unsigned int tid = threadIdx.x; if (tid >= n) return; int *data = iData + blockIdx.x * blockDim.x; for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { data[tid] += data[tid + stride]; } // synchronize within block __syncthreads(); } if (tid == 0) { oData[blockIdx.x] = data[0]; } } int main(int argc, char **argv) { initDevice(0); int size = 1 << 24; int blockSize = 1024; if (argc > 1) { blockSize = atoi(argv[1]); } dim3 block(blockSize, 1); dim3 grid((size - 1) / block.x + 1, 1); size_t bytes = size * sizeof(int); int *iDataH = (int*) malloc(bytes); int *oDataH = (int*) malloc(grid.x * sizeof(int)); int *tmp = (int*) malloc(bytes); initDataInt(iDataH, size); memcpy(tmp, iDataH, bytes); double iStart, iElaps; int gupSum = 0; int *iDataD = NULL; int *oDataD = NULL; CHECK(cudaMalloc((void**)&iDataD, bytes)); CHECK(cudaMalloc((void**)&oDataD, grid.x * sizeof(int))); int cpuSum = 0; iStart = cpuSecond(); for (int i = 0; i < size; i++) { cpuSum += tmp[i]; } printf("cpu sum: %d\n", cpuSum); iElaps = cpuSecond() - iStart; printf("cpu cost time %lf ms\n", iElaps); CHECK(cudaMemcpyAsync(iDataD, iDataH, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = cpuSecond(); warmUp<<<grid.x / 2, block>>>(iDataD, oDataD, size); cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("gpu warmup elapsed %lf ms\n", iElaps); // cpu int cpu; cudaDeviceReset(); return 0; }
6bb75f6214bf3ef82ca9783cff1a86cd690bc9ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlarfx.cu normal z -> s, Fri Jan 30 19:00:08 2015 */ #include "common_magma.h" #include "commonblas_s.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 //============================================================================== __global__ void magma_slarfx_kernel( int m, float *v, float *tau, float *c, int ldc, float *xnorm, float *T, int it ) { if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) { const int tx = threadIdx.x; //float *dc = c + (blockIdx.x-it-1) * ldc; float *dc = c + (blockIdx.x) * ldc; __shared__ float sum[ BLOCK_SIZE ]; float lsum; /* NOTE HERE C is the C at position C(i, 0) * if blockIdx.x<it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T * if blockIdx.x>it it perform w := v**H * C */ lsum = MAGMA_S_ZERO; for( int j = tx; j < m; j += BLOCK_SIZE ){ if (j==0){ lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); v[j] = MAGMA_S_ONE; } else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] ); } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*tau) * sum[0]; if (blockIdx.x>it){ for( int j = m-tx-1; j>=0 ; j -= BLOCK_SIZE ) dc[j] += z__1 * v[j]; __syncthreads(); /* Adjust the rest of the column norms */ /* if (tx==0){ float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp); } */ } else { if (blockIdx.x==it) *(T+it) = *tau; else *(T+blockIdx.x) = MAGMA_S_CNJG(z__1); } } else if (blockIdx.x<=it)// in case tau is zero put the corresponding column of T to zero { *(T+blockIdx.x) = MAGMA_S_ZERO; } } //============================================================================== extern "C" __global__ void magma_strmv_kernel(const float *T, int ldt, float *t) { const int tx = threadIdx.x; T += tx; __shared__ float tlocal[ BLOCK_SIZE ]; float res = MAGMA_S_MAKE(0., 0.); tlocal[tx] = t[tx]; __syncthreads(); #pragma unroll for(int j=0; j<blockDim.x; j++) res += T[j*ldt]*tlocal[j]; t[tx] = res; } extern "C" __global__ void magma_strmv_kernel2(const float *T, int ldt, float *t, float *y, float *tau) { const int tx = threadIdx.x; T += blockIdx.x; __shared__ float sum[ 128 ]; sum[tx] = T[tx*ldt]*t[tx]; magma_sum_reduce_n(blockDim.x, tx, sum); __syncthreads(); if (tx==0){ y[blockIdx.x] = sum[0]; if (blockIdx.x==0) y[gridDim.x] = tau[0]; } } //============================================================================== extern "C" __global__ void magma_strmv_tkernel(float *T, int ldt, float *t, float *y) { const int tx = threadIdx.x; T += blockIdx.x*ldt; __shared__ float sum[ 128 ]; sum[tx] = MAGMA_S_CNJG(T[tx])*t[tx]; magma_sum_reduce_n(blockDim.x, tx, sum); __syncthreads(); if (tx==0) y[blockIdx.x] = sum[0]; } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms are adjusted to hold the norms of v(2:m,2:n). This is a difference with the LAPACK's slarf routine. */ extern "C" void magma_slarfx_gpu( magma_int_t m, magma_int_t n, magmaFloat_ptr v, magmaFloat_ptr tau, magmaFloat_ptr C, magma_int_t ldc, magmaFloat_ptr xnorm, magmaFloat_ptr dT, magma_int_t iter, magmaFloat_ptr work ) { magma_int_t N = n + iter + 1; if (iter==0) hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, C, ldc, xnorm, dT+iter*N, iter); else hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, C, ldc, xnorm, work, iter); if (iter > 0){ //magma_strmv_kernel<<< 1, iter, 0, magma_stream >>>( dT, N, dT+iter*N); hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(iter), dim3(iter), 0, magma_stream , dT, N, work, dT+iter*N, tau); } } //==============================================================================
6bb75f6214bf3ef82ca9783cff1a86cd690bc9ef.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlarfx.cu normal z -> s, Fri Jan 30 19:00:08 2015 */ #include "common_magma.h" #include "commonblas_s.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 //============================================================================== __global__ void magma_slarfx_kernel( int m, float *v, float *tau, float *c, int ldc, float *xnorm, float *T, int it ) { if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) { const int tx = threadIdx.x; //float *dc = c + (blockIdx.x-it-1) * ldc; float *dc = c + (blockIdx.x) * ldc; __shared__ float sum[ BLOCK_SIZE ]; float lsum; /* NOTE HERE C is the C at position C(i, 0) * if blockIdx.x<it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T * if blockIdx.x>it it perform w := v**H * C */ lsum = MAGMA_S_ZERO; for( int j = tx; j < m; j += BLOCK_SIZE ){ if (j==0){ lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); v[j] = MAGMA_S_ONE; } else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] ); } sum[tx] = lsum; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*tau) * sum[0]; if (blockIdx.x>it){ for( int j = m-tx-1; j>=0 ; j -= BLOCK_SIZE ) dc[j] += z__1 * v[j]; __syncthreads(); /* Adjust the rest of the column norms */ /* if (tx==0){ float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp); } */ } else { if (blockIdx.x==it) *(T+it) = *tau; else *(T+blockIdx.x) = MAGMA_S_CNJG(z__1); } } else if (blockIdx.x<=it)// in case tau is zero put the corresponding column of T to zero { *(T+blockIdx.x) = MAGMA_S_ZERO; } } //============================================================================== extern "C" __global__ void magma_strmv_kernel(const float *T, int ldt, float *t) { const int tx = threadIdx.x; T += tx; __shared__ float tlocal[ BLOCK_SIZE ]; float res = MAGMA_S_MAKE(0., 0.); tlocal[tx] = t[tx]; __syncthreads(); #pragma unroll for(int j=0; j<blockDim.x; j++) res += T[j*ldt]*tlocal[j]; t[tx] = res; } extern "C" __global__ void magma_strmv_kernel2(const float *T, int ldt, float *t, float *y, float *tau) { const int tx = threadIdx.x; T += blockIdx.x; __shared__ float sum[ 128 ]; sum[tx] = T[tx*ldt]*t[tx]; magma_sum_reduce_n(blockDim.x, tx, sum); __syncthreads(); if (tx==0){ y[blockIdx.x] = sum[0]; if (blockIdx.x==0) y[gridDim.x] = tau[0]; } } //============================================================================== extern "C" __global__ void magma_strmv_tkernel(float *T, int ldt, float *t, float *y) { const int tx = threadIdx.x; T += blockIdx.x*ldt; __shared__ float sum[ 128 ]; sum[tx] = MAGMA_S_CNJG(T[tx])*t[tx]; magma_sum_reduce_n(blockDim.x, tx, sum); __syncthreads(); if (tx==0) y[blockIdx.x] = sum[0]; } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms are adjusted to hold the norms of v(2:m,2:n). This is a difference with the LAPACK's slarf routine. */ extern "C" void magma_slarfx_gpu( magma_int_t m, magma_int_t n, magmaFloat_ptr v, magmaFloat_ptr tau, magmaFloat_ptr C, magma_int_t ldc, magmaFloat_ptr xnorm, magmaFloat_ptr dT, magma_int_t iter, magmaFloat_ptr work ) { magma_int_t N = n + iter + 1; if (iter==0) magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, C, ldc, xnorm, dT+iter*N, iter); else magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, C, ldc, xnorm, work, iter); if (iter > 0){ //magma_strmv_kernel<<< 1, iter, 0, magma_stream >>>( dT, N, dT+iter*N); magma_strmv_kernel2<<< iter, iter, 0, magma_stream >>>( dT, N, work, dT+iter*N, tau); } } //==============================================================================
bb3d0141ea5d4d98357f633c8ec601f4319044f6.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/BatchLinearAlgebra.h> #include <ATen/native/hip/BatchLinearAlgebraLib.h> #include <ATen/native/cpu/zmath.h> #include <THH/THH.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma_types.h> #include <magma_v2.h> const bool use_magma_ = true; #else const bool use_magma_ = false; #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaEig( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda, scalar_t *w, scalar_t *VL, magma_int_t ldvl, scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork, value_t *rwork, magma_int_t *info); template<class scalar_t, class value_t=scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaGels( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, scalar_t* hwork, magma_int_t lwork, magma_int_t* info); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesv_gpu(n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesv_gpu(n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_zgesv_batched(n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_cgesv_batched(n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>( magma_int_t n) { return magma_get_zgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>( magma_int_t n) { return magma_get_cgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetri<c10::complex<double>>( magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetri_gpu( n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dwork), lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetri<c10::complex<float>>( magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetri_gpu( n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dwork), lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetriBatched<c10::complex<double>>( magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<double>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetri_outofplace_batched( n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaDoubleComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGetriBatched<c10::complex<float>>( magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<float>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetri_outofplace_batched( n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaFloatComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaDoubleComplex alpha({1, 0}); magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaFloatComplex alpha({1, 0}); magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>( magma_int_t m, magma_int_t n) { return magma_get_zgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>( magma_int_t m, magma_int_t n) { return magma_get_cgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGeqrf<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_zgeqrf_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), info); } else { magma_zgeqrf2_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), info); } AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaGeqrf<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_cgeqrf_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), info); } else { magma_cgeqrf2_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), info); } AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaOrgqr<c10::complex<double>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zungqr_gpu( m, n, k, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), nb, info); AT_CUDA_CHECK(hipGetLastError()); } template <> void magmaOrgqr<c10::complex<float>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cungqr_gpu( m, n, k, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), nb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<c10::complex<double>, double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA), ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<c10::complex<float>, float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA), ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, double *A, magma_int_t lda, double *w, double *VL, magma_int_t ldvl, double *VR, magma_int_t ldvr, double *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; // magma [sd]geev wants to separate output arrays: wr and wi for the real // and imaginary parts double *wr = w; double *wi = w + n; (void)rwork; // unused magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, float *A, magma_int_t lda, float *w, float *VL, magma_int_t ldvl, float *VR, magma_int_t ldvr, float *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; float *wr = w; float *wi = w + n; (void)rwork; // unused magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<c10::complex<double>, double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<double> *A, magma_int_t lda, c10::complex<double> *w, c10::complex<double> *VL, magma_int_t ldvl, c10::complex<double> *VR, magma_int_t ldvr, c10::complex<double> *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_zgeev(jobvl, jobvr, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, reinterpret_cast<magmaDoubleComplex*>(w), reinterpret_cast<magmaDoubleComplex*>(VL), ldvl, reinterpret_cast<magmaDoubleComplex*>(VR), ldvr, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaEig<c10::complex<float>, float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<float> *A, magma_int_t lda, c10::complex<float> *w, c10::complex<float> *VL, magma_int_t ldvl, c10::complex<float> *VR, magma_int_t ldvr, c10::complex<float> *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_cgeev(jobvl, jobvr, n, reinterpret_cast<magmaFloatComplex*>(A), lda, reinterpret_cast<magmaFloatComplex*>(w), reinterpret_cast<magmaFloatComplex*>(VL), ldvl, reinterpret_cast<magmaFloatComplex*>(VR), ldvr, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, float* rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<c10::complex<float>, float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A, magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu, c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork, float *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s, reinterpret_cast<magmaFloatComplex*>(U), ldu, reinterpret_cast<magmaFloatComplex*>(VT), ldvt, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<c10::complex<double>, double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A, magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu, c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s, reinterpret_cast<magmaDoubleComplex*>(U), ldu, reinterpret_cast<magmaDoubleComplex*>(VT), ldvt, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGels<float>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, float* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgels_gpu(trans, m, n, nrhs, dA, ldda, dB, lddb, hwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGels<double>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, double* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgels_gpu(trans, m, n, nrhs, dA, ldda, dB, lddb, hwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGels<c10::complex<float>>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgels_gpu(trans, m, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGels<c10::complex<double>>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgels_gpu(trans, m, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info); AT_CUDA_CHECK(hipGetLastError()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, Tensor& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t lda = ::max(magma_int_t{1}, n); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); infos = infos.to(at::kCPU); // magmaSolve requires infos tensor to live on CPU magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(), b_data, lda, infos.data_ptr<magma_int_t>()); } else { auto infos_data = infos.data_ptr<magma_int_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &infos_data[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda, &infos_data[mini_idx], batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); // infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos.item().toInt(), "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // This is a type dispatching helper function for 'apply_solve' Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) { // 'result' and 'input' should be in column major order (it should be checked before calling this function) // the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve' // 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations) // 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system) AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{ apply_solve<scalar_t>(result, input, infos); }); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'. 'infos' is an int Tensor containing error codes for each matrix in the batched input. 'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors For more information see MAGMA's documentation for GETRI and GETRF routines. */ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_lu_data = infos_lu.data_ptr<magma_int_t>(); auto infos_getri_data = infos_getri.data_ptr<magma_int_t>(); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); // MAGMA does not work with batch_size == 0, let's return early in this case if (batch_size == 0) { return; } magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } // magmaLuBatched leaves ipiv_data values unwritten for singular matrices. // Initialize to avoid memory access violations inside magma kernels (gh-51930). std::fill_n(ipiv_data, batch_size * n, 1); MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, lda, ipiv_array, infos_lu_data, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur, lda, info_array_cur_getri, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx], lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue); } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); // magmaLu and magmaGetri requires infos tensor to live on CPU infos_lu = infos_lu.to(at::kCPU); infos_getri = infos_getri.to(at::kCPU); Tensor ipiv = at::empty({lda}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>()); magmaGetri<scalar_t>( n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>()); #endif } Tensor _inverse_helper_cuda_legacy(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos_lu, infos_getri); }); batchCheckErrors(infos_lu, "inverse_cuda"); batchCheckErrors(infos_getri, "inverse_cuda"); } else { // magmaLu and magmaGetri requires infos tensor to live on CPU auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri); }); singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda"); singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda"); } return self_inv_working_copy; } Tensor _inverse_helper_cuda(const Tensor& self) { #ifdef USE_CUSOLVER if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) { return _inverse_helper_cuda_lib(self); // cusolver or cublas } else { return _inverse_helper_cuda_legacy(self); // magma-cuda } #else return _inverse_helper_cuda_legacy(self); // magma-cuda #endif } // This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors' Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) { // assuming result is in column major order and contains the matrices to invert if (result.dim() > 2) { auto input_working_copy = cloneBatchedColumnMajor(result); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse<scalar_t>( input_working_copy, result, infos_lu, infos_getri); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse<scalar_t>(result, infos_lu, infos_getri); }); } return result; } // This is a MAGMA/cuSOLVER dispatching helper function Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert #ifdef USE_CUSOLVER if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) { return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas } else { return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda } #else return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda #endif return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda, b_data, lda, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); auto lda = std::max<magma_int_t>(1, n); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); int64_t batch_limit = self.is_complex() ? 65535 : 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information) // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda_magma(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor result; if (self.dim() > 2) { // MAGMA's batched cholesky operator has an off-by-one error causing IMA // (see https://github.com/pytorch/pytorch/issues/42666). This code is based // on the #cloneBatchedColumnMajor function however it pads the input with // one extra element utilizing the fact that the resize_as_ method preserves // the storage even if it's larger than the new sizes. This way if MAGMA // reads off bounds it will still be valid user memory. const Tensor input = upper ? self : self.transpose(-1, -2); result = at::empty(input.numel() + 1, input.options()); result.resize_as_(input).copy_(input).transpose_(-1, -2); } else { result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES( self.scalar_type(), "cholesky_cuda", [&] { apply_cholesky<scalar_t>(result, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } return upper ? result.transpose_(-1, -2) : result; } // Todo: cusolverDnXpotrfBatched has some numerical issue and is not used // here. Batched cholesky is dispatched to magma. // We will switch to cusolverDnXpotrfBatched after the issue is fixed. // See https://github.com/pytorch/pytorch/issues/53879. Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { #ifdef USE_CUSOLVER if (batchCount(self) == 1 || !use_magma_) { return _cholesky_helper_cuda_cusolver(self, upper); } else { return _cholesky_helper_cuda_magma(self, upper); } #else return _cholesky_helper_cuda_magma(self, upper); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver This is an in-place routine, content of 'input' is overwritten. 'infos' is an int Tensor containing error codes for each matrix in the batched input. MAGMA requires 'infos' to reside in CPU memory. For more information see MAGMA's documentation for POTRS routine. */ template <typename scalar_t> static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) { #ifndef USE_MAGMA TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA."); #else // magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally // it transfers data several times between GPU and CPU and calls lapack routine on CPU // using magmaCholeskySolveBatched is a lot faster // note that magmaCholeskySolve is also slow // 'input' is modified in-place we need to clone it and replace with a diagonal matrix // for apply_cholesky_solve auto input_working_copy = cloneBatchedColumnMajor(input); // 'input' tensor has to be a batch of diagonal matrix input.fill_(0); input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); Tensor result_u, input_u; if (input.dim() == 2) { // unsqueezing here so that the batched version is used result_u = input.unsqueeze(0); input_u = input_working_copy.unsqueeze(0); } else { result_u = input; input_u = input_working_copy; } // magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument // it returns a single 'magma_int_t' // if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value. int64_t info_tmp = 0; apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp); infos.fill_(info_tmp); #endif } // This is a type dispatching helper function for 'apply_cholesky_inverse' Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert // the content of result is overwritten by 'apply_cholesky_inverse' AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{ apply_cholesky_inverse<scalar_t>(result, infos, upper); }); return result; } REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = ::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = ::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; trans = conjugate_transpose ? MagmaConjTrans : trans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); // magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched // magmaTriangularSolve is calling cuBLAS and it prints // ** On entry to DTRSM parameter number 9 had an illegal value // so let's use proper lda parameter here magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit; int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0 for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, lda, b_array_cur, lda, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue); } #endif } void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { // For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version if (batchCount(A) <= 8 && A.size(-1) >= 64) { triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); } else { #ifndef USE_MAGMA triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); #else // cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster if (A.size(-1) <= 512) { triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); } else { triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); } #endif // USE_MAGMA } } REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau, Tensor& infos, int64_t n_columns) { // TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32) // using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA. // See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA // and Windows failure. // For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983 #if defined(USE_CUSOLVER) return orgqr_helper_cuda_lib(result, tau, infos, n_columns); // cusolver #else TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ", "PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support."); #endif } REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns, bool compute_q, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)"); magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)"); auto r_data = R.data_ptr<scalar_t>(); auto r_matrix_stride = matrixStride(R); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } if (!compute_q) { // this is for mode='r' return; } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 auto q_data = Q.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) { bool compute_q, reduced; std::tie(compute_q, reduced) = _parse_qr_mode(mode); std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { int64_t n = self.size(-1); r_working_copy = at::empty({n_columns_q, n}, self.options()); if (compute_q) { int64_t n_rows_q = q_sizes[self.dim() - 2]; q_working_copy = at::eye(n_rows_q, n_columns_q, self.options()); } else { q_working_copy = at::empty({0}, self.options()); } return std::make_tuple(q_working_copy, r_working_copy); } if (compute_q) { q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); } else { q_working_copy = at::empty({0}, self.options()); } r_working_copy = cloneBatchedColumnMajor(self); int64_t m = q_sizes[self.dim() - 2]; int64_t n = r_working_copy.size(-1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } if (compute_q) { q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q); } r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu(); return std::make_tuple(q_working_copy, r_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<value_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magma_int_t lrwork = -1; value_t rwkopt; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); value_t* rwork = nullptr; c10::Storage storage_rwork; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { lrwork = magma_int_cast(rwkopt, "rwork_size"); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype())); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options().dtype(dtype)) : at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU // memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy // the returned values from CPU to GPU. See also magmaSymeig, which uses a // similar approach. template <typename scalar_t> static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs, int64_t *info_ptr) { #ifndef USE_MAGMA TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. " "Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA."); #else TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor"); using value_t = typename c10::scalar_value_type<scalar_t>::type; magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec; magma_int_t n = magma_int_cast(self.size(-1), "n"); auto self_data = self.data_ptr<scalar_t>(); auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>(); scalar_t *wr = out_eigvals_data; scalar_t *vr_data = NULL; magma_int_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = out_eigvecs.data_ptr<scalar_t>(); ldvr = n; } value_t *rwork_data = nullptr; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { ALLOCATE_ARRAY(rwork_data, value_t, n*2); } if (n > 0) { // call magmaEig once to get the optimal size of work_data scalar_t wkopt; magma_int_t info; magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info); magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt)); // call it a 2nd time to to the actual work scalar_t *work_data = nullptr; ALLOCATE_ARRAY(work_data, scalar_t, lwork); magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info); *info_ptr = info; } #endif } /* * Internal helper; like eig_cuda but: * 1. assume that self is a square matrix of side "n" * 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory * by the caller */ std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) { int64_t n = self.size(-1); // copy self to pinned CPU memory auto self_working_copy = at::empty_strided( {n, n}, // square matrix {1, n}, // column-ordered, as magmaEig expects at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); // tensors holding the results. We use empty_strided to make them column-ordered auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor out_eigvals; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { out_eigvals = at::empty({n}, options); } else { out_eigvals = at::empty_strided({n, 2}, {1, n}, options); } auto out_eigvecs = eigenvectors ? at::empty_strided({n, n}, {1, n}, options) : Tensor(); int64_t info; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{ apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info); }); singleCheckErrors(info, "eig_cuda"); return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs); } REGISTER_DISPATCH(eig_stub, &eig_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self' // compute_eigenvectors controls whether eigenvectors should be computed // uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L" // '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos' // See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) { // NumPy allows lowercase input for UPLO argument // It is assumed that uplo_str is either "U" or "L" char uplo = std::toupper(uplo_str[0]); bool upper = uplo == 'U' ? true : false; return _symeig_helper_cuda(self, compute_eigenvectors, upper); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto lda = std::max<magma_int_t>(1, m); auto ldvt = std::max<magma_int_t>(1, n); auto mn = ::min(m, n); c10::Storage storage_rwork; value_t* rwork = nullptr; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn); if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { auto lrwork = computeLRWorkDim(jobchar, m, n); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info); lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; value_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda, S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = ::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] { apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device())); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (!compute_uv) { VT_working_copy.zero_(); U_working_copy.zero_(); } if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. // Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V. VT_working_copy = VT_working_copy.conj(); VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { #ifdef USE_CUSOLVER return _svd_helper_cuda_lib(self, some, compute_uv); #else return _svd_helper_cuda_legacy(self, some, compute_uv); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ std::tuple<Tensor, Tensor, Tensor> _lstsq_helper_cuda( const Tensor& a, const Tensor& b, double cond, c10::optional<std::string> driver_name) { #ifndef USE_MAGMA AT_ERROR("torch.linalg.lstsq: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "torch.linalg.lstsq_cuda", [&] { auto trans = MagmaNoTrans; auto m = magma_int_cast(a.size(-2), "m"); auto n = magma_int_cast(a.size(-1), "n"); auto nrhs = magma_int_cast(b.size(-1), "nrhs"); auto ldda = std::max<magma_int_t>(1, m); auto lddb = std::max<magma_int_t>(1, ::max(m, n)); auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb; Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type()); auto* hwork_ptr = hwork.data_ptr<scalar_t>(); magma_int_t info; batch_iterator_with_broadcasting<scalar_t>(a, b, [&](scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx) { magmaGels<scalar_t>(trans, m, n, nrhs, a_working_ptr, ldda, b_working_ptr, lddb, hwork_ptr, lwork, &info); singleCheckErrors(static_cast<int64_t>(info), "torch.linalg.lstsq_cuda"); } ); }); Tensor rank, singular_values; return std::make_tuple(b, rank, singular_values); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ }} // namespace at::native #undef ALLOCATE_ARRAY
bb3d0141ea5d4d98357f633c8ec601f4319044f6.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/BatchLinearAlgebra.h> #include <ATen/native/cuda/BatchLinearAlgebraLib.h> #include <ATen/native/cpu/zmath.h> #include <THC/THC.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma_types.h> #include <magma_v2.h> const bool use_magma_ = true; #else const bool use_magma_ = false; #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t, class value_t=scalar_t> void magmaEig( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda, scalar_t *w, scalar_t *VL, magma_int_t ldvl, scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork, value_t *rwork, magma_int_t *info); template<class scalar_t, class value_t=scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, value_t* rwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaGels( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, scalar_t* hwork, magma_int_t lwork, magma_int_t* info); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesv_gpu(n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesv_gpu(n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_zgesv_batched(n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_cgesv_batched(n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>( magma_int_t n) { return magma_get_zgetri_nb(n); } template <> inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>( magma_int_t n) { return magma_get_cgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetri<c10::complex<double>>( magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetri_gpu( n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dwork), lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetri<c10::complex<float>>( magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetri_gpu( n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dwork), lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetriBatched<c10::complex<double>>( magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<double>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zgetri_outofplace_batched( n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaDoubleComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGetriBatched<c10::complex<float>>( magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, c10::complex<float>** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cgetri_outofplace_batched( n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, reinterpret_cast<magmaFloatComplex**>(dinvA_array), lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrs_gpu(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cpotrs_batched(uplo, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<double>>( magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<c10::complex<float>>( magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<double>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaDoubleComplex alpha({1, 0}); magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<c10::complex<float>>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmaFloatComplex alpha({1, 0}); magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>( magma_int_t m, magma_int_t n) { return magma_get_zgeqrf_nb(m, n); } template <> inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>( magma_int_t m, magma_int_t n) { return magma_get_cgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGeqrf<c10::complex<double>>( magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_zgeqrf_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), info); } else { magma_zgeqrf2_gpu( m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), info); } AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaGeqrf<c10::complex<float>>( magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_cgeqrf_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), info); } else { magma_cgeqrf2_gpu( m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), info); } AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaOrgqr<c10::complex<double>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* tau, c10::complex<double>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zungqr_gpu( m, n, k, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(tau), reinterpret_cast<magmaDoubleComplex*>(dT), nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template <> void magmaOrgqr<c10::complex<float>>( magma_int_t m, magma_int_t n, magma_int_t k, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* tau, c10::complex<float>* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cungqr_gpu( m, n, k, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(tau), reinterpret_cast<magmaFloatComplex*>(dT), nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { (void)rwork; // unused (void)lrwork; // unused MagmaStreamSyncGuard guard; magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<c10::complex<double>, double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda, double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA), ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<c10::complex<float>, float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda, float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork, magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cheevd_gpu( jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA), ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, double *A, magma_int_t lda, double *w, double *VL, magma_int_t ldvl, double *VR, magma_int_t ldvr, double *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; // magma [sd]geev wants to separate output arrays: wr and wi for the real // and imaginary parts double *wr = w; double *wi = w + n; (void)rwork; // unused magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, float *A, magma_int_t lda, float *w, float *VL, magma_int_t ldvl, float *VR, magma_int_t ldvr, float *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; float *wr = w; float *wi = w + n; (void)rwork; // unused magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<c10::complex<double>, double>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<double> *A, magma_int_t lda, c10::complex<double> *w, c10::complex<double> *VL, magma_int_t ldvl, c10::complex<double> *VR, magma_int_t ldvr, c10::complex<double> *work, magma_int_t lwork, double *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_zgeev(jobvl, jobvr, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, reinterpret_cast<magmaDoubleComplex*>(w), reinterpret_cast<magmaDoubleComplex*>(VL), ldvl, reinterpret_cast<magmaDoubleComplex*>(VR), ldvr, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaEig<c10::complex<float>, float>( magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, c10::complex<float> *A, magma_int_t lda, c10::complex<float> *w, c10::complex<float> *VL, magma_int_t ldvl, c10::complex<float> *VR, magma_int_t ldvr, c10::complex<float> *work, magma_int_t lwork, float *rwork, magma_int_t *info) { MagmaStreamSyncGuard guard; magma_cgeev(jobvl, jobvr, n, reinterpret_cast<magmaFloatComplex*>(A), lda, reinterpret_cast<magmaFloatComplex*>(w), reinterpret_cast<magmaFloatComplex*>(VL), ldvl, reinterpret_cast<magmaFloatComplex*>(VR), ldvr, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, float* rwork, magma_int_t* iwork, magma_int_t* info) { (void)rwork; // unused MagmaStreamSyncGuard guard; magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<c10::complex<float>, float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A, magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu, c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork, float *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s, reinterpret_cast<magmaFloatComplex*>(U), ldu, reinterpret_cast<magmaFloatComplex*>(VT), ldvt, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<c10::complex<double>, double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A, magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu, c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork, double *rwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s, reinterpret_cast<magmaDoubleComplex*>(U), ldu, reinterpret_cast<magmaDoubleComplex*>(VT), ldvt, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<double>>( magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<c10::complex<float>>( magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGels<float>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, float* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgels_gpu(trans, m, n, nrhs, dA, ldda, dB, lddb, hwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGels<double>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, double* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgels_gpu(trans, m, n, nrhs, dA, ldda, dB, lddb, hwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGels<c10::complex<float>>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb, c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_cgels_gpu(trans, m, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, reinterpret_cast<magmaFloatComplex*>(dB), lddb, reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGels<c10::complex<double>>( magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb, c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_zgels_gpu(trans, m, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, Tensor& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t lda = std::max(magma_int_t{1}, n); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); infos = infos.to(at::kCPU); // magmaSolve requires infos tensor to live on CPU magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(), b_data, lda, infos.data_ptr<magma_int_t>()); } else { auto infos_data = infos.data_ptr<magma_int_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &infos_data[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda, &infos_data[mini_idx], batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); // infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos.item().toInt(), "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // This is a type dispatching helper function for 'apply_solve' Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) { // 'result' and 'input' should be in column major order (it should be checked before calling this function) // the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve' // 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations) // 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system) AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{ apply_solve<scalar_t>(result, input, infos); }); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'. 'infos' is an int Tensor containing error codes for each matrix in the batched input. 'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors For more information see MAGMA's documentation for GETRI and GETRF routines. */ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_lu_data = infos_lu.data_ptr<magma_int_t>(); auto infos_getri_data = infos_getri.data_ptr<magma_int_t>(); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); // MAGMA does not work with batch_size == 0, let's return early in this case if (batch_size == 0) { return; } magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } // magmaLuBatched leaves ipiv_data values unwritten for singular matrices. // Initialize to avoid memory access violations inside magma kernels (gh-51930). std::fill_n(ipiv_data, batch_size * n, 1); MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, lda, ipiv_array, infos_lu_data, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur, lda, info_array_cur_getri, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx], lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue); } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); // magmaLu and magmaGetri requires infos tensor to live on CPU infos_lu = infos_lu.to(at::kCPU); infos_getri = infos_getri.to(at::kCPU); Tensor ipiv = at::empty({lda}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>()); magmaGetri<scalar_t>( n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>()); #endif } Tensor _inverse_helper_cuda_legacy(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos_lu, infos_getri); }); batchCheckErrors(infos_lu, "inverse_cuda"); batchCheckErrors(infos_getri, "inverse_cuda"); } else { // magmaLu and magmaGetri requires infos tensor to live on CPU auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri); }); singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda"); singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda"); } return self_inv_working_copy; } Tensor _inverse_helper_cuda(const Tensor& self) { #ifdef USE_CUSOLVER if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) { return _inverse_helper_cuda_lib(self); // cusolver or cublas } else { return _inverse_helper_cuda_legacy(self); // magma-cuda } #else return _inverse_helper_cuda_legacy(self); // magma-cuda #endif } // This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors' Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) { // assuming result is in column major order and contains the matrices to invert if (result.dim() > 2) { auto input_working_copy = cloneBatchedColumnMajor(result); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse<scalar_t>( input_working_copy, result, infos_lu, infos_getri); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse<scalar_t>(result, infos_lu, infos_getri); }); } return result; } // This is a MAGMA/cuSOLVER dispatching helper function Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert #ifdef USE_CUSOLVER if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) { return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas } else { return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda } #else return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda #endif return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda, b_data, lda, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); auto lda = std::max<magma_int_t>(1, n); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); int64_t batch_limit = self.is_complex() ? 65535 : 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information) // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda_magma(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor result; if (self.dim() > 2) { // MAGMA's batched cholesky operator has an off-by-one error causing IMA // (see https://github.com/pytorch/pytorch/issues/42666). This code is based // on the #cloneBatchedColumnMajor function however it pads the input with // one extra element utilizing the fact that the resize_as_ method preserves // the storage even if it's larger than the new sizes. This way if MAGMA // reads off bounds it will still be valid user memory. const Tensor input = upper ? self : self.transpose(-1, -2); result = at::empty(input.numel() + 1, input.options()); result.resize_as_(input).copy_(input).transpose_(-1, -2); } else { result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES( self.scalar_type(), "cholesky_cuda", [&] { apply_cholesky<scalar_t>(result, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } return upper ? result.transpose_(-1, -2) : result; } // Todo: cusolverDnXpotrfBatched has some numerical issue and is not used // here. Batched cholesky is dispatched to magma. // We will switch to cusolverDnXpotrfBatched after the issue is fixed. // See https://github.com/pytorch/pytorch/issues/53879. Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { #ifdef USE_CUSOLVER if (batchCount(self) == 1 || !use_magma_) { return _cholesky_helper_cuda_cusolver(self, upper); } else { return _cholesky_helper_cuda_magma(self, upper); } #else return _cholesky_helper_cuda_magma(self, upper); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /* Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver This is an in-place routine, content of 'input' is overwritten. 'infos' is an int Tensor containing error codes for each matrix in the batched input. MAGMA requires 'infos' to reside in CPU memory. For more information see MAGMA's documentation for POTRS routine. */ template <typename scalar_t> static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) { #ifndef USE_MAGMA TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA."); #else // magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally // it transfers data several times between GPU and CPU and calls lapack routine on CPU // using magmaCholeskySolveBatched is a lot faster // note that magmaCholeskySolve is also slow // 'input' is modified in-place we need to clone it and replace with a diagonal matrix // for apply_cholesky_solve auto input_working_copy = cloneBatchedColumnMajor(input); // 'input' tensor has to be a batch of diagonal matrix input.fill_(0); input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); Tensor result_u, input_u; if (input.dim() == 2) { // unsqueezing here so that the batched version is used result_u = input.unsqueeze(0); input_u = input_working_copy.unsqueeze(0); } else { result_u = input; input_u = input_working_copy; } // magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument // it returns a single 'magma_int_t' // if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value. int64_t info_tmp = 0; apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp); infos.fill_(info_tmp); #endif } // This is a type dispatching helper function for 'apply_cholesky_inverse' Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) { // This function calculates the inverse matrix in-place // result should be in column major order and contain matrices to invert // the content of result is overwritten by 'apply_cholesky_inverse' AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{ apply_cholesky_inverse<scalar_t>(result, infos, upper); }); return result; } REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = std::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = std::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; trans = conjugate_transpose ? MagmaConjTrans : trans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); // magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched // magmaTriangularSolve is calling cuBLAS and it prints // ** On entry to DTRSM parameter number 9 had an illegal value // so let's use proper lda parameter here magma_int_t lda = std::max<magma_int_t>(1, n); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit; int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0 for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, lda, b_array_cur, lda, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue); } #endif } void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { // For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version if (batchCount(A) <= 8 && A.size(-1) >= 64) { triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); } else { #ifndef USE_MAGMA triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); #else // cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster if (A.size(-1) <= 512) { triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); } else { triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular); } #endif // USE_MAGMA } } REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau, Tensor& infos, int64_t n_columns) { // TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32) // using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA. // See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA // and Windows failure. // For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983 #if defined(USE_CUSOLVER) return orgqr_helper_cuda_lib(result, tau, infos, n_columns); // cusolver #else TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ", "PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support."); #endif } REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns, bool compute_q, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)"); magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)"); auto r_data = R.data_ptr<scalar_t>(); auto r_matrix_stride = matrixStride(R); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } if (!compute_q) { // this is for mode='r' return; } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 auto q_data = Q.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _linalg_qr_helper_cuda(const Tensor& self, std::string mode) { bool compute_q, reduced; std::tie(compute_q, reduced) = _parse_qr_mode(mode); std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { int64_t n = self.size(-1); r_working_copy = at::empty({n_columns_q, n}, self.options()); if (compute_q) { int64_t n_rows_q = q_sizes[self.dim() - 2]; q_working_copy = at::eye(n_rows_q, n_columns_q, self.options()); } else { q_working_copy = at::empty({0}, self.options()); } return std::make_tuple(q_working_copy, r_working_copy); } if (compute_q) { q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); } else { q_working_copy = at::empty({0}, self.options()); } r_working_copy = cloneBatchedColumnMajor(self); int64_t m = q_sizes[self.dim() - 2]; int64_t n = r_working_copy.size(-1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } if (compute_q) { q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q); } r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu(); return std::make_tuple(q_working_copy, r_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<value_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magma_int_t lrwork = -1; value_t rwkopt; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); value_t* rwork = nullptr; c10::Storage storage_rwork; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { lrwork = magma_int_cast(rwkopt, "rwork_size"); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype())); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options().dtype(dtype)) : at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU // memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy // the returned values from CPU to GPU. See also magmaSymeig, which uses a // similar approach. template <typename scalar_t> static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs, int64_t *info_ptr) { #ifndef USE_MAGMA TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. " "Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA."); #else TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor"); using value_t = typename c10::scalar_value_type<scalar_t>::type; magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec; magma_int_t n = magma_int_cast(self.size(-1), "n"); auto self_data = self.data_ptr<scalar_t>(); auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>(); scalar_t *wr = out_eigvals_data; scalar_t *vr_data = NULL; magma_int_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = out_eigvecs.data_ptr<scalar_t>(); ldvr = n; } value_t *rwork_data = nullptr; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { ALLOCATE_ARRAY(rwork_data, value_t, n*2); } if (n > 0) { // call magmaEig once to get the optimal size of work_data scalar_t wkopt; magma_int_t info; magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info); magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt)); // call it a 2nd time to to the actual work scalar_t *work_data = nullptr; ALLOCATE_ARRAY(work_data, scalar_t, lwork); magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info); *info_ptr = info; } #endif } /* * Internal helper; like eig_cuda but: * 1. assume that self is a square matrix of side "n" * 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory * by the caller */ std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) { int64_t n = self.size(-1); // copy self to pinned CPU memory auto self_working_copy = at::empty_strided( {n, n}, // square matrix {1, n}, // column-ordered, as magmaEig expects at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); // tensors holding the results. We use empty_strided to make them column-ordered auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor out_eigvals; if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { out_eigvals = at::empty({n}, options); } else { out_eigvals = at::empty_strided({n, 2}, {1, n}, options); } auto out_eigvecs = eigenvectors ? at::empty_strided({n, n}, {1, n}, options) : Tensor(); int64_t info; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{ apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info); }); singleCheckErrors(info, "eig_cuda"); return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs); } REGISTER_DISPATCH(eig_stub, &eig_kernel_impl); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self' // compute_eigenvectors controls whether eigenvectors should be computed // uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L" // '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos' // See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) { // NumPy allows lowercase input for UPLO argument // It is assumed that uplo_str is either "U" or "L" char uplo = std::toupper(uplo_str[0]); bool upper = uplo == 'U' ? true : false; return _symeig_helper_cuda(self, compute_eigenvectors, upper); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto lda = std::max<magma_int_t>(1, m); auto ldvt = std::max<magma_int_t>(1, n); auto mn = std::min(m, n); c10::Storage storage_rwork; value_t* rwork = nullptr; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn); if (isComplexType(at::typeMetaToScalarType(self.dtype()))) { auto lrwork = computeLRWorkDim(jobchar, m, n); storage_rwork = pin_memory<value_t>(lrwork); rwork = static_cast<value_t*>(storage_rwork.data()); } magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info); lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; value_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda, S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = std::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] { apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device())); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (!compute_uv) { VT_working_copy.zero_(); U_working_copy.zero_(); } if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. // Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V. VT_working_copy = VT_working_copy.conj(); VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { #ifdef USE_CUSOLVER return _svd_helper_cuda_lib(self, some, compute_uv); #else return _svd_helper_cuda_legacy(self, some, compute_uv); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ std::tuple<Tensor, Tensor, Tensor> _lstsq_helper_cuda( const Tensor& a, const Tensor& b, double cond, c10::optional<std::string> driver_name) { #ifndef USE_MAGMA AT_ERROR("torch.linalg.lstsq: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "torch.linalg.lstsq_cuda", [&] { auto trans = MagmaNoTrans; auto m = magma_int_cast(a.size(-2), "m"); auto n = magma_int_cast(a.size(-1), "n"); auto nrhs = magma_int_cast(b.size(-1), "nrhs"); auto ldda = std::max<magma_int_t>(1, m); auto lddb = std::max<magma_int_t>(1, std::max(m, n)); auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb; Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type()); auto* hwork_ptr = hwork.data_ptr<scalar_t>(); magma_int_t info; batch_iterator_with_broadcasting<scalar_t>(a, b, [&](scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx) { magmaGels<scalar_t>(trans, m, n, nrhs, a_working_ptr, ldda, b_working_ptr, lddb, hwork_ptr, lwork, &info); singleCheckErrors(static_cast<int64_t>(info), "torch.linalg.lstsq_cuda"); } ); }); Tensor rank, singular_values; return std::make_tuple(b, rank, singular_values); #endif } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ }} // namespace at::native #undef ALLOCATE_ARRAY
a44166835402d0250e31abf7e356583468c17588.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common/book.h" int main(void) { hipDeviceProp_t prop; int dev; HANDLE_ERROR(hipGetDevice(&dev)); printf("ID of current CUDA device: %d\n", dev); memset(&prop, 0, sizeof(hipDeviceProp_t)); prop.major = 1; prop.minor = 3; HANDLE_ERROR(hipChooseDevice(&dev, &prop)); printf("ID of CUDA device closest to revision 1.3: %d\n", dev); HANDLE_ERROR(hipSetDevice(dev)); getchar(); }
a44166835402d0250e31abf7e356583468c17588.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common/book.h" int main(void) { cudaDeviceProp prop; int dev; HANDLE_ERROR(cudaGetDevice(&dev)); printf("ID of current CUDA device: %d\n", dev); memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 3; HANDLE_ERROR(cudaChooseDevice(&dev, &prop)); printf("ID of CUDA device closest to revision 1.3: %d\n", dev); HANDLE_ERROR(cudaSetDevice(dev)); getchar(); }
c85d9b86fba2fc52b86be6636595c4e98e68e9d4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_gteScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_gteScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_gteScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_gteScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c85d9b86fba2fc52b86be6636595c4e98e68e9d4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_gteScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_gteScalar<<<gridBlock,threadBlock>>>(n,result,x,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_gteScalar<<<gridBlock,threadBlock>>>(n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_gteScalar<<<gridBlock,threadBlock>>>(n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ec12b76e2529389adab68083fef526793067b5c0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "cf_shift_kernel.cuh" int main(int argc, char **argv) { int compute_iters=COMP_ITERATIONS, kernel_calls=KERNEL_CALLS, vector_size=VECTOR_SIZE, tile_dim=TILE_DIM; if (argc > 3 || argc == 2) { printf("\nError: Wrong number of arguments.\n\n"); printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]); return -1; } if (argc == 3) { kernel_calls = atoi(argv[2]); compute_iters = atoi(argv[1]); } printf("Number of kernel launches: %d\n", kernel_calls); printf("Number of compute iterations: %d\n", compute_iters); // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events hipEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size); // allocate host memory int *h_iA = (int *) malloc(mem_size); int *h_oC1 = (int *) malloc(mem_size); int *h_oC2 = (int *) malloc(mem_size); int *h_oC3 = (int *) malloc(mem_size); int *h_oC4 = (int *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (int) i+3; // h_iB[i] = (float) i+3; } // allocate device memory int *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4; hipMalloc((void **) &d_iA, mem_size); hipMalloc((void **) &d_oC1, mem_size); hipMalloc((void **) &d_oC2, mem_size); hipMalloc((void **) &d_oC3, mem_size); hipMalloc((void **) &d_oC4, mem_size); // copy host data to device hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events hipEventCreate(&start); hipEventCreate(&stop); // take measurements for loop over kernel launches hipEventRecord(start, 0); for (int i=0; i < kernel_calls; i++) callKernel(threads, grid, d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim); hipEventRecord(stop, 0); hipEventSynchronize(stop); float kernelTime; hipEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/kernel_calls); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/kernel_calls, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); hipFree(d_iA); hipFree(d_oC1); hipFree(d_oC2); hipFree(d_oC3); hipFree(d_oC4); hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
ec12b76e2529389adab68083fef526793067b5c0.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "cf_shift_kernel.cuh" int main(int argc, char **argv) { int compute_iters=COMP_ITERATIONS, kernel_calls=KERNEL_CALLS, vector_size=VECTOR_SIZE, tile_dim=TILE_DIM; if (argc > 3 || argc == 2) { printf("\nError: Wrong number of arguments.\n\n"); printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]); return -1; } if (argc == 3) { kernel_calls = atoi(argv[2]); compute_iters = atoi(argv[1]); } printf("Number of kernel launches: %d\n", kernel_calls); printf("Number of compute iterations: %d\n", compute_iters); // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events cudaEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size); // allocate host memory int *h_iA = (int *) malloc(mem_size); int *h_oC1 = (int *) malloc(mem_size); int *h_oC2 = (int *) malloc(mem_size); int *h_oC3 = (int *) malloc(mem_size); int *h_oC4 = (int *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (int) i+3; // h_iB[i] = (float) i+3; } // allocate device memory int *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4; cudaMalloc((void **) &d_iA, mem_size); cudaMalloc((void **) &d_oC1, mem_size); cudaMalloc((void **) &d_oC2, mem_size); cudaMalloc((void **) &d_oC3, mem_size); cudaMalloc((void **) &d_oC4, mem_size); // copy host data to device cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events cudaEventCreate(&start); cudaEventCreate(&stop); // take measurements for loop over kernel launches cudaEventRecord(start, 0); for (int i=0; i < kernel_calls; i++) callKernel(threads, grid, d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float kernelTime; cudaEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/kernel_calls); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/kernel_calls, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); cudaFree(d_iA); cudaFree(d_oC1); cudaFree(d_oC2); cudaFree(d_oC3); cudaFree(d_oC4); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
e729353ee083ddde9e546c38f8f492c7eb1b3221.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// #include "gradops_hdbne5.cuh" __global__ void hyperdifbsourcene5_parallel(struct params *p, real *w, real *wnew, real *wmod, real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int m,ii1; real fip,fim1,tmpc; int index,k; int ni=p->n[0]; int nj=p->n[1]; //real dt=p->dt; real dy=p->dx[1]; real dx=p->dx[0]; //real g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; int ip,jp,ipg,jpg; jp=iindex/(ni/(p->npgp[0])); ip=iindex-(jp*(ni/(p->npgp[0]))); for(ipg=0;ipg<(p->npgp[0]);ipg++) for(jpg=0;jpg<(p->npgp[1]);jpg++) { i=ip*(p->npgp[0])+ipg; j=jp*(p->npgp[1])+jpg; if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1)) { wtemp[fencode_hdbne5(p,i,j,tmp6)]=grad1_hdbne5(wtemp,p,i,j,tmp5,mm); } } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors_hdbne5(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = hipGetLastError(); if (err != hipSuccess) { char *e = (char*) hipGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuhyperdifbsourcene5(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real **d_wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod, // real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd) //init_parallel(struct params *p, real *b, real *u, real *v, real *h) hipLaunchKernelGGL(( hyperdifbsourcene5_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); hipDeviceSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //hipDeviceSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // hipDeviceSynchronize(); // hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost); //hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost); //hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost); //checkErrors("copy data from device"); }
e729353ee083ddde9e546c38f8f492c7eb1b3221.cu
#include "cudapars.h" #include "paramssteeringtest1.h" ///////////////////////////////////// // standard imports ///////////////////////////////////// #include <stdio.h> #include <math.h> #include "step.h" ///////////////////////////////////// // kernel function (CUDA device) ///////////////////////////////////// #include "gradops_hdbne5.cuh" __global__ void hyperdifbsourcene5_parallel(struct params *p, real *w, real *wnew, real *wmod, real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt) { // compute the global index in the vector from // the number of the current block, blockIdx, // the number of threads per block, blockDim, // and the number of the current thread within the block, threadIdx //int i = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.y * blockDim.y + threadIdx.y; int iindex = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int m,ii1; real fip,fim1,tmpc; int index,k; int ni=p->n[0]; int nj=p->n[1]; //real dt=p->dt; real dy=p->dx[1]; real dx=p->dx[0]; //real g=p->g; // dt=1.0; //dt=0.05; //enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3; int ip,jp,ipg,jpg; jp=iindex/(ni/(p->npgp[0])); ip=iindex-(jp*(ni/(p->npgp[0]))); for(ipg=0;ipg<(p->npgp[0]);ipg++) for(jpg=0;jpg<(p->npgp[1]);jpg++) { i=ip*(p->npgp[0])+ipg; j=jp*(p->npgp[1])+jpg; if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1)) { wtemp[fencode_hdbne5(p,i,j,tmp6)]=grad1_hdbne5(wtemp,p,i,j,tmp5,mm); } } __syncthreads(); } ///////////////////////////////////// // error checking routine ///////////////////////////////////// void checkErrors_hdbne5(char *label) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } int cuhyperdifbsourcene5(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real **d_wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt) { //printf("calling propagate solution\n"); //dim3 dimBlock(blocksize, blocksize); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimBlock(dimblock, 1); //dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y); int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock; //__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod, // real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd) //init_parallel(struct params *p, real *b, real *u, real *v, real *h) hyperdifbsourcene5_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt); //prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h); //printf("called prop\n"); cudaThreadSynchronize(); //boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called boundary\n"); //cudaThreadSynchronize(); //update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew); //printf("called update\n"); // cudaThreadSynchronize(); // cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost); //cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost); //cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost); //checkErrors("copy data from device"); }
84e0d6d555d2a601170a878494770074be994bd7.hip
// !!! This is a file automatically generated by hipify!!! // CUDA runtime #include <hip/hip_runtime.h> #include <typedef.hpp> #include <cuda_macro.hpp> #include <thrust/device_ptr.h> #include <thrust/tuple.h> #include <thrust/transform.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/system/hip/execution_policy.h> #include <vector> #include <iostream> #include "DynSysSchemeData_cuda.hpp" #include <Core/UVec.hpp> #include <Core/CudaStream.hpp> #if defined(WITH_GPU) #if defined(USER_DEFINED_GPU_DYNSYS_FUNC) struct GetHamValue3_dim0 { template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { FLOAT_TYPE b = thrust::get<0>(v); FLOAT_TYPE c = thrust::get<1>(v); return b * c; } }; struct GetHamValue3_dimNot0 { template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { FLOAT_TYPE b = thrust::get<1>(v); FLOAT_TYPE c = thrust::get<2>(v); thrust::get<0>(v) += b * c; } }; struct GetHamValue3_dimNot0Neg { template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { FLOAT_TYPE b = thrust::get<1>(v); FLOAT_TYPE c = thrust::get<2>(v); const FLOAT_TYPE r = thrust::get<0>(v) + b * c; thrust::get<0>(v) = -r; } }; struct GetHamValue2 { const FLOAT_TYPE b; GetHamValue2(const FLOAT_TYPE b) : b(b) {} template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { FLOAT_TYPE c = thrust::get<1>(v); thrust::get<0>(v) += b * c; } }; struct GetHamValue2Neg { const FLOAT_TYPE b; GetHamValue2Neg(const FLOAT_TYPE b) : b(b) {} template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { const FLOAT_TYPE c = thrust::get<1>(v); const FLOAT_TYPE r = thrust::get<0>(v) + b * c; thrust::get<0>(v) = -r; } }; struct GetHamValue_1dim { GetHamValue_1dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); FLOAT_TYPE r = deriv0 * dx0; return r; } }; struct GetHamValue_1dimNeg { GetHamValue_1dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); FLOAT_TYPE r = deriv0 * dx0; return -r; } }; struct GetHamValue_2dim { GetHamValue_2dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; return r; } }; struct GetHamValue_2dimNeg { GetHamValue_2dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; return -r; } }; struct GetHamValue_3dim { GetHamValue_3dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; return r; } }; struct GetHamValue_3dimNeg { GetHamValue_3dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; return -r; } }; struct GetHamValue_4dim { GetHamValue_4dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE dx0 = thrust::get<4>(v); const FLOAT_TYPE dx1 = thrust::get<5>(v); const FLOAT_TYPE dx2 = thrust::get<6>(v); const FLOAT_TYPE dx3 = thrust::get<7>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; return r; } }; struct GetHamValue_4dimNeg { GetHamValue_4dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE dx0 = thrust::get<4>(v); const FLOAT_TYPE dx1 = thrust::get<5>(v); const FLOAT_TYPE dx2 = thrust::get<6>(v); const FLOAT_TYPE dx3 = thrust::get<7>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; return -r; } }; struct GetHamValue_5dim { GetHamValue_5dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE deriv4 = thrust::get<4>(v); const FLOAT_TYPE dx0 = thrust::get<5>(v); const FLOAT_TYPE dx1 = thrust::get<6>(v); const FLOAT_TYPE dx2 = thrust::get<7>(v); const FLOAT_TYPE dx3 = thrust::get<8>(v); const FLOAT_TYPE dx4 = thrust::get<9>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; r += deriv4 * dx4; return r; } }; struct GetHamValue_5dimNeg { GetHamValue_5dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE deriv4 = thrust::get<4>(v); const FLOAT_TYPE dx0 = thrust::get<5>(v); const FLOAT_TYPE dx1 = thrust::get<6>(v); const FLOAT_TYPE dx2 = thrust::get<7>(v); const FLOAT_TYPE dx3 = thrust::get<8>(v); const FLOAT_TYPE dx4 = thrust::get<9>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; r += deriv4 * dx4; return -r; } }; struct GetHamValueTIdx_1dim { const FLOAT_TYPE TIderiv; GetHamValueTIdx_1dim(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); const FLOAT_TYPE TIdx0 = thrust::get<2>(v); FLOAT_TYPE r = deriv0 * dx0; r += TIderiv * TIdx0; return r; } }; struct GetHamValueTIdx_1dimNeg { const FLOAT_TYPE TIderiv; GetHamValueTIdx_1dimNeg(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); const FLOAT_TYPE TIdx0 = thrust::get<2>(v); FLOAT_TYPE r = deriv0 * dx0; r += TIderiv * TIdx0; return -r; } }; struct GetHamValueTIdx_2dim { const FLOAT_TYPE TIderiv; GetHamValueTIdx_2dim(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); const FLOAT_TYPE TIdx0 = thrust::get<4>(v); const FLOAT_TYPE TIdx1 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += TIderiv * TIdx0; r += TIderiv * TIdx1; return r; } }; struct GetHamValueTIdx_2dimNeg { const FLOAT_TYPE TIderiv; GetHamValueTIdx_2dimNeg(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); const FLOAT_TYPE TIdx0 = thrust::get<4>(v); const FLOAT_TYPE TIdx1 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += TIderiv * TIdx0; r += TIderiv * TIdx1; return -r; } }; struct GetHamValueTIdx_3dim { const FLOAT_TYPE TIderiv; GetHamValueTIdx_3dim(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); const FLOAT_TYPE TIdx0 = thrust::get<6>(v); const FLOAT_TYPE TIdx1 = thrust::get<7>(v); const FLOAT_TYPE TIdx2 = thrust::get<8>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += TIderiv * TIdx0; r += TIderiv * TIdx1; r += TIderiv * TIdx2; return r; } }; struct GetHamValueTIdx_3dimNeg { const FLOAT_TYPE TIderiv; GetHamValueTIdx_3dimNeg(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); const FLOAT_TYPE TIdx0 = thrust::get<6>(v); const FLOAT_TYPE TIdx1 = thrust::get<7>(v); const FLOAT_TYPE TIdx2 = thrust::get<8>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += TIderiv * TIdx0; r += TIderiv * TIdx1; r += TIderiv * TIdx2; return -r; } }; bool hamFunc_exec_cuda( beacls::UVec& hamValue_uvec, const std::vector<beacls::UVec>& deriv_uvecs, const std::vector<beacls::UVec>& dx_uvecs, const std::vector<beacls::UVec>& TIdx_uvecs, const FLOAT_TYPE TIderiv, const bool TIdim, const bool negate ) { if (dx_uvecs.empty()) return false; beacls::reallocateAsSrc(hamValue_uvec, dx_uvecs[0]); FLOAT_TYPE* hamValue = beacls::UVec_<FLOAT_TYPE>(hamValue_uvec).ptr(); const size_t num_of_dimensions = deriv_uvecs.size(); if (beacls::is_cuda(hamValue_uvec)) { thrust::device_ptr<FLOAT_TYPE> hamValue_dev_ptr = thrust::device_pointer_cast(hamValue); hipStream_t ham_stream = beacls::get_stream(hamValue_uvec); if (!TIdim) { switch(num_of_dimensions) { case 1: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, dx0_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_1dimNeg()); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_1dim()); } } break; case 2: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, dx0_dev_ptr, dx1_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_2dimNeg()); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_2dim()); } } break; case 3: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_3dimNeg()); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_3dim()); } } break; case 4: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); const FLOAT_TYPE* deriv3_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[3]).ptr(); const FLOAT_TYPE* dx3_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[3]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv3_dev_ptr = thrust::device_pointer_cast(deriv3_ptr); thrust::device_ptr<const FLOAT_TYPE> dx3_dev_ptr = thrust::device_pointer_cast(dx3_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, deriv3_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr, dx3_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_4dimNeg()); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_4dim()); } } break; case 5: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); const FLOAT_TYPE* deriv3_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[3]).ptr(); const FLOAT_TYPE* dx3_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[3]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv3_dev_ptr = thrust::device_pointer_cast(deriv3_ptr); thrust::device_ptr<const FLOAT_TYPE> dx3_dev_ptr = thrust::device_pointer_cast(dx3_ptr); const FLOAT_TYPE* deriv4_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[4]).ptr(); const FLOAT_TYPE* dx4_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[4]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv4_dev_ptr = thrust::device_pointer_cast(deriv4_ptr); thrust::device_ptr<const FLOAT_TYPE> dx4_dev_ptr = thrust::device_pointer_cast(dx4_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, deriv3_dev_ptr, deriv4_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr, dx3_dev_ptr, dx4_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_5dimNeg()); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_5dim()); } } break; default: beacls::synchronizeUVec(dx_uvecs[0]); for (size_t dimension = 0; dimension < num_of_dimensions; ++dimension) { const FLOAT_TYPE* deriv_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[dimension]).ptr(); const FLOAT_TYPE* dx_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[dimension]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv_dev_ptr = thrust::device_pointer_cast(deriv_ptr); thrust::device_ptr<const FLOAT_TYPE> dx_dev_ptr = thrust::device_pointer_cast(dx_ptr); if (dimension==0){ auto src_Tuple = thrust::make_tuple(deriv_dev_ptr, dx_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[dimension].size(), hamValue_dev_ptr, GetHamValue3_dim0()); } else { auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, deriv_dev_ptr, dx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); thrust::for_each(thrust::hip::par.on(ham_stream), src_dst_ite, src_dst_ite + dx_uvecs[dimension].size(), GetHamValue3_dimNot0()); } } break; } } else { switch(num_of_dimensions) { case 1: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* TIdx0_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx0_dev_ptr = thrust::device_pointer_cast(TIdx0_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, dx0_dev_ptr, TIdx0_dev_ptr ); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_1dimNeg(TIderiv)); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_1dim(TIderiv)); } } break; case 2: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); beacls::synchronizeUVec(TIdx_uvecs[0]); const FLOAT_TYPE* TIdx0_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx0_dev_ptr = thrust::device_pointer_cast(TIdx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* TIdx1_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx1_dev_ptr = thrust::device_pointer_cast(TIdx1_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, TIdx0_dev_ptr, TIdx1_dev_ptr ); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_2dimNeg(TIderiv)); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_2dim(TIderiv)); } } break; case 3: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); beacls::synchronizeUVec(TIdx_uvecs[0]); const FLOAT_TYPE* TIdx0_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx0_dev_ptr = thrust::device_pointer_cast(TIdx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* TIdx1_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx1_dev_ptr = thrust::device_pointer_cast(TIdx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); const FLOAT_TYPE* TIdx2_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx2_dev_ptr = thrust::device_pointer_cast(TIdx2_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr, TIdx0_dev_ptr, TIdx1_dev_ptr, TIdx2_dev_ptr ); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_3dimNeg(TIderiv)); } else { thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_3dim(TIderiv)); } } break; default: beacls::synchronizeUVec(dx_uvecs[0]); for (size_t dimension = 0; dimension < num_of_dimensions; ++dimension) { const FLOAT_TYPE* deriv_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[dimension]).ptr(); const FLOAT_TYPE* dx_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[dimension]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv_dev_ptr = thrust::device_pointer_cast(deriv_ptr); thrust::device_ptr<const FLOAT_TYPE> dx_dev_ptr = thrust::device_pointer_cast(dx_ptr); if (dimension==0){ auto src_Tuple = thrust::make_tuple(deriv_dev_ptr, dx_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); thrust::transform(thrust::hip::par.on(ham_stream), src_ite, src_ite + dx_uvecs[dimension].size(), hamValue_dev_ptr, GetHamValue3_dim0()); } else if (dimension==num_of_dimensions-1 && negate) { auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, deriv_dev_ptr, dx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); thrust::for_each(thrust::hip::par.on(ham_stream), src_dst_ite, src_dst_ite + dx_uvecs[dimension].size(), GetHamValue3_dimNot0Neg()); }else { auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, deriv_dev_ptr, dx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); thrust::for_each(thrust::hip::par.on(ham_stream), src_dst_ite, src_dst_ite + dx_uvecs[dimension].size(), GetHamValue3_dimNot0()); } } beacls::synchronizeUVec(TIdx_uvecs[0]); for (size_t dimension = 0; dimension < num_of_dimensions; ++dimension) { const FLOAT_TYPE* TIdx_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[dimension]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx_dev_ptr = thrust::device_pointer_cast(TIdx_ptr); auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, TIdx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); if (dimension==num_of_dimensions-1 && negate) { thrust::for_each(thrust::hip::par.on(ham_stream), src_dst_ite, src_dst_ite + TIdx_uvecs[dimension].size(), GetHamValue2Neg(TIderiv)); } else { thrust::for_each(thrust::hip::par.on(ham_stream), src_dst_ite, src_dst_ite + TIdx_uvecs[dimension].size(), GetHamValue2(TIderiv)); } } } } } else { std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl; return false; } return true; } struct GetMax4 { template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE uu = thrust::get<0>(v); const FLOAT_TYPE ul = thrust::get<1>(v); const FLOAT_TYPE ll = thrust::get<2>(v); const FLOAT_TYPE lu = thrust::get<3>(v); const FLOAT_TYPE maxUUUL = max_float_type<FLOAT_TYPE>(abs_float_type<FLOAT_TYPE>(uu), abs_float_type<FLOAT_TYPE>(ul)); const FLOAT_TYPE maxLULL = max_float_type<FLOAT_TYPE>(abs_float_type<FLOAT_TYPE>(lu), abs_float_type<FLOAT_TYPE>(ll)); const FLOAT_TYPE maxUUULLULL = max_float_type<FLOAT_TYPE>(maxUUUL, maxLULL); return maxUUULLULL; } }; bool partialFunc_exec_cuda( beacls::UVec& alphas_uvec, const beacls::UVec& dxLL_dim, const beacls::UVec& dxLU_dim, const beacls::UVec& dxUL_dim, const beacls::UVec& dxUU_dim ) { const size_t length = dxUU_dim.size(); if (alphas_uvec.type() != dxUU_dim.type()) alphas_uvec = beacls::UVec(dxUU_dim.depth(), dxUU_dim.type(), length); else alphas_uvec.resize(length); const FLOAT_TYPE* dxUU_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxUU_dim).ptr(); const FLOAT_TYPE* dxUL_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxUL_dim).ptr(); const FLOAT_TYPE* dxLL_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxLL_dim).ptr(); const FLOAT_TYPE* dxLU_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxLU_dim).ptr(); FLOAT_TYPE* alphas = beacls::UVec_<FLOAT_TYPE>(alphas_uvec).ptr(); if ((dxUU_dim.type() == beacls::UVecType_Cuda) && (dxUL_dim.type() == beacls::UVecType_Cuda) && (dxLU_dim.type() == beacls::UVecType_Cuda) && (dxLL_dim.type() == beacls::UVecType_Cuda) ){ hipStream_t alpha_stream = beacls::get_stream(alphas_uvec); thrust::device_ptr<FLOAT_TYPE> alphas_dev_ptr = thrust::device_pointer_cast(alphas); beacls::synchronizeUVec(dxUU_dim); beacls::synchronizeUVec(dxUL_dim); beacls::synchronizeUVec(dxLU_dim); beacls::synchronizeUVec(dxLL_dim); thrust::device_ptr<const FLOAT_TYPE> dxUU_dim_dev_ptr = thrust::device_pointer_cast(dxUU_dim_ptr); thrust::device_ptr<const FLOAT_TYPE> dxUL_dim_dev_ptr = thrust::device_pointer_cast(dxUL_dim_ptr); thrust::device_ptr<const FLOAT_TYPE> dxLL_dim_dev_ptr = thrust::device_pointer_cast(dxLL_dim_ptr); thrust::device_ptr<const FLOAT_TYPE> dxLU_dim_dev_ptr = thrust::device_pointer_cast(dxLU_dim_ptr); auto float_type4Tuple = thrust::make_tuple( dxUU_dim_dev_ptr, dxUL_dim_dev_ptr, dxLL_dim_dev_ptr, dxLU_dim_dev_ptr); auto float_type4Iterator = thrust::make_zip_iterator(float_type4Tuple); thrust::transform(thrust::hip::par.on(alpha_stream), float_type4Iterator, float_type4Iterator + length, alphas_dev_ptr, GetMax4()); } else { for (size_t i = 0; i < length; ++i) { const FLOAT_TYPE max0 = max_float_type<FLOAT_TYPE>( abs_float_type<FLOAT_TYPE>(dxUU_dim_ptr[i]), abs_float_type<FLOAT_TYPE>(dxUL_dim_ptr[i])); const FLOAT_TYPE max1 = max_float_type<FLOAT_TYPE>( abs_float_type<FLOAT_TYPE>(dxLL_dim_ptr[i]), abs_float_type<FLOAT_TYPE>(dxLU_dim_ptr[i])); alphas[i] = max_float_type<FLOAT_TYPE>(max0, max1); } } return true; } #endif /* defined(USER_DEFINED_GPU_DYNSYS_FUNC) */ #endif /* defined(WITH_GPU) */
84e0d6d555d2a601170a878494770074be994bd7.cu
// CUDA runtime #include <cuda_runtime.h> #include <typedef.hpp> #include <cuda_macro.hpp> #include <thrust/device_ptr.h> #include <thrust/tuple.h> #include <thrust/transform.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/system/cuda/execution_policy.h> #include <vector> #include <iostream> #include "DynSysSchemeData_cuda.hpp" #include <Core/UVec.hpp> #include <Core/CudaStream.hpp> #if defined(WITH_GPU) #if defined(USER_DEFINED_GPU_DYNSYS_FUNC) struct GetHamValue3_dim0 { template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { FLOAT_TYPE b = thrust::get<0>(v); FLOAT_TYPE c = thrust::get<1>(v); return b * c; } }; struct GetHamValue3_dimNot0 { template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { FLOAT_TYPE b = thrust::get<1>(v); FLOAT_TYPE c = thrust::get<2>(v); thrust::get<0>(v) += b * c; } }; struct GetHamValue3_dimNot0Neg { template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { FLOAT_TYPE b = thrust::get<1>(v); FLOAT_TYPE c = thrust::get<2>(v); const FLOAT_TYPE r = thrust::get<0>(v) + b * c; thrust::get<0>(v) = -r; } }; struct GetHamValue2 { const FLOAT_TYPE b; GetHamValue2(const FLOAT_TYPE b) : b(b) {} template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { FLOAT_TYPE c = thrust::get<1>(v); thrust::get<0>(v) += b * c; } }; struct GetHamValue2Neg { const FLOAT_TYPE b; GetHamValue2Neg(const FLOAT_TYPE b) : b(b) {} template<typename Tuple> __host__ __device__ void operator()(Tuple v) const { const FLOAT_TYPE c = thrust::get<1>(v); const FLOAT_TYPE r = thrust::get<0>(v) + b * c; thrust::get<0>(v) = -r; } }; struct GetHamValue_1dim { GetHamValue_1dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); FLOAT_TYPE r = deriv0 * dx0; return r; } }; struct GetHamValue_1dimNeg { GetHamValue_1dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); FLOAT_TYPE r = deriv0 * dx0; return -r; } }; struct GetHamValue_2dim { GetHamValue_2dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; return r; } }; struct GetHamValue_2dimNeg { GetHamValue_2dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; return -r; } }; struct GetHamValue_3dim { GetHamValue_3dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; return r; } }; struct GetHamValue_3dimNeg { GetHamValue_3dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; return -r; } }; struct GetHamValue_4dim { GetHamValue_4dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE dx0 = thrust::get<4>(v); const FLOAT_TYPE dx1 = thrust::get<5>(v); const FLOAT_TYPE dx2 = thrust::get<6>(v); const FLOAT_TYPE dx3 = thrust::get<7>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; return r; } }; struct GetHamValue_4dimNeg { GetHamValue_4dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE dx0 = thrust::get<4>(v); const FLOAT_TYPE dx1 = thrust::get<5>(v); const FLOAT_TYPE dx2 = thrust::get<6>(v); const FLOAT_TYPE dx3 = thrust::get<7>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; return -r; } }; struct GetHamValue_5dim { GetHamValue_5dim() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE deriv4 = thrust::get<4>(v); const FLOAT_TYPE dx0 = thrust::get<5>(v); const FLOAT_TYPE dx1 = thrust::get<6>(v); const FLOAT_TYPE dx2 = thrust::get<7>(v); const FLOAT_TYPE dx3 = thrust::get<8>(v); const FLOAT_TYPE dx4 = thrust::get<9>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; r += deriv4 * dx4; return r; } }; struct GetHamValue_5dimNeg { GetHamValue_5dimNeg() {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE deriv3 = thrust::get<3>(v); const FLOAT_TYPE deriv4 = thrust::get<4>(v); const FLOAT_TYPE dx0 = thrust::get<5>(v); const FLOAT_TYPE dx1 = thrust::get<6>(v); const FLOAT_TYPE dx2 = thrust::get<7>(v); const FLOAT_TYPE dx3 = thrust::get<8>(v); const FLOAT_TYPE dx4 = thrust::get<9>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += deriv3 * dx3; r += deriv4 * dx4; return -r; } }; struct GetHamValueTIdx_1dim { const FLOAT_TYPE TIderiv; GetHamValueTIdx_1dim(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); const FLOAT_TYPE TIdx0 = thrust::get<2>(v); FLOAT_TYPE r = deriv0 * dx0; r += TIderiv * TIdx0; return r; } }; struct GetHamValueTIdx_1dimNeg { const FLOAT_TYPE TIderiv; GetHamValueTIdx_1dimNeg(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE dx0 = thrust::get<1>(v); const FLOAT_TYPE TIdx0 = thrust::get<2>(v); FLOAT_TYPE r = deriv0 * dx0; r += TIderiv * TIdx0; return -r; } }; struct GetHamValueTIdx_2dim { const FLOAT_TYPE TIderiv; GetHamValueTIdx_2dim(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); const FLOAT_TYPE TIdx0 = thrust::get<4>(v); const FLOAT_TYPE TIdx1 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += TIderiv * TIdx0; r += TIderiv * TIdx1; return r; } }; struct GetHamValueTIdx_2dimNeg { const FLOAT_TYPE TIderiv; GetHamValueTIdx_2dimNeg(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE dx0 = thrust::get<2>(v); const FLOAT_TYPE dx1 = thrust::get<3>(v); const FLOAT_TYPE TIdx0 = thrust::get<4>(v); const FLOAT_TYPE TIdx1 = thrust::get<5>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += TIderiv * TIdx0; r += TIderiv * TIdx1; return -r; } }; struct GetHamValueTIdx_3dim { const FLOAT_TYPE TIderiv; GetHamValueTIdx_3dim(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); const FLOAT_TYPE TIdx0 = thrust::get<6>(v); const FLOAT_TYPE TIdx1 = thrust::get<7>(v); const FLOAT_TYPE TIdx2 = thrust::get<8>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += TIderiv * TIdx0; r += TIderiv * TIdx1; r += TIderiv * TIdx2; return r; } }; struct GetHamValueTIdx_3dimNeg { const FLOAT_TYPE TIderiv; GetHamValueTIdx_3dimNeg(const FLOAT_TYPE TIderiv) : TIderiv(TIderiv) {} template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE deriv0 = thrust::get<0>(v); const FLOAT_TYPE deriv1 = thrust::get<1>(v); const FLOAT_TYPE deriv2 = thrust::get<2>(v); const FLOAT_TYPE dx0 = thrust::get<3>(v); const FLOAT_TYPE dx1 = thrust::get<4>(v); const FLOAT_TYPE dx2 = thrust::get<5>(v); const FLOAT_TYPE TIdx0 = thrust::get<6>(v); const FLOAT_TYPE TIdx1 = thrust::get<7>(v); const FLOAT_TYPE TIdx2 = thrust::get<8>(v); FLOAT_TYPE r = deriv0 * dx0; r += deriv1 * dx1; r += deriv2 * dx2; r += TIderiv * TIdx0; r += TIderiv * TIdx1; r += TIderiv * TIdx2; return -r; } }; bool hamFunc_exec_cuda( beacls::UVec& hamValue_uvec, const std::vector<beacls::UVec>& deriv_uvecs, const std::vector<beacls::UVec>& dx_uvecs, const std::vector<beacls::UVec>& TIdx_uvecs, const FLOAT_TYPE TIderiv, const bool TIdim, const bool negate ) { if (dx_uvecs.empty()) return false; beacls::reallocateAsSrc(hamValue_uvec, dx_uvecs[0]); FLOAT_TYPE* hamValue = beacls::UVec_<FLOAT_TYPE>(hamValue_uvec).ptr(); const size_t num_of_dimensions = deriv_uvecs.size(); if (beacls::is_cuda(hamValue_uvec)) { thrust::device_ptr<FLOAT_TYPE> hamValue_dev_ptr = thrust::device_pointer_cast(hamValue); cudaStream_t ham_stream = beacls::get_stream(hamValue_uvec); if (!TIdim) { switch(num_of_dimensions) { case 1: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, dx0_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_1dimNeg()); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_1dim()); } } break; case 2: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, dx0_dev_ptr, dx1_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_2dimNeg()); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_2dim()); } } break; case 3: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_3dimNeg()); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_3dim()); } } break; case 4: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); const FLOAT_TYPE* deriv3_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[3]).ptr(); const FLOAT_TYPE* dx3_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[3]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv3_dev_ptr = thrust::device_pointer_cast(deriv3_ptr); thrust::device_ptr<const FLOAT_TYPE> dx3_dev_ptr = thrust::device_pointer_cast(dx3_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, deriv3_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr, dx3_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_4dimNeg()); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_4dim()); } } break; case 5: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); const FLOAT_TYPE* deriv3_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[3]).ptr(); const FLOAT_TYPE* dx3_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[3]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv3_dev_ptr = thrust::device_pointer_cast(deriv3_ptr); thrust::device_ptr<const FLOAT_TYPE> dx3_dev_ptr = thrust::device_pointer_cast(dx3_ptr); const FLOAT_TYPE* deriv4_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[4]).ptr(); const FLOAT_TYPE* dx4_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[4]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv4_dev_ptr = thrust::device_pointer_cast(deriv4_ptr); thrust::device_ptr<const FLOAT_TYPE> dx4_dev_ptr = thrust::device_pointer_cast(dx4_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, deriv3_dev_ptr, deriv4_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr, dx3_dev_ptr, dx4_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_5dimNeg()); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValue_5dim()); } } break; default: beacls::synchronizeUVec(dx_uvecs[0]); for (size_t dimension = 0; dimension < num_of_dimensions; ++dimension) { const FLOAT_TYPE* deriv_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[dimension]).ptr(); const FLOAT_TYPE* dx_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[dimension]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv_dev_ptr = thrust::device_pointer_cast(deriv_ptr); thrust::device_ptr<const FLOAT_TYPE> dx_dev_ptr = thrust::device_pointer_cast(dx_ptr); if (dimension==0){ auto src_Tuple = thrust::make_tuple(deriv_dev_ptr, dx_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[dimension].size(), hamValue_dev_ptr, GetHamValue3_dim0()); } else { auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, deriv_dev_ptr, dx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); thrust::for_each(thrust::cuda::par.on(ham_stream), src_dst_ite, src_dst_ite + dx_uvecs[dimension].size(), GetHamValue3_dimNot0()); } } break; } } else { switch(num_of_dimensions) { case 1: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); const FLOAT_TYPE* TIdx0_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx0_dev_ptr = thrust::device_pointer_cast(TIdx0_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, dx0_dev_ptr, TIdx0_dev_ptr ); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_1dimNeg(TIderiv)); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_1dim(TIderiv)); } } break; case 2: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); beacls::synchronizeUVec(TIdx_uvecs[0]); const FLOAT_TYPE* TIdx0_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx0_dev_ptr = thrust::device_pointer_cast(TIdx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* TIdx1_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx1_dev_ptr = thrust::device_pointer_cast(TIdx1_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, TIdx0_dev_ptr, TIdx1_dev_ptr ); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_2dimNeg(TIderiv)); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_2dim(TIderiv)); } } break; case 3: { const FLOAT_TYPE* deriv0_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[0]).ptr(); beacls::synchronizeUVec(dx_uvecs[0]); const FLOAT_TYPE* dx0_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv0_dev_ptr = thrust::device_pointer_cast(deriv0_ptr); thrust::device_ptr<const FLOAT_TYPE> dx0_dev_ptr = thrust::device_pointer_cast(dx0_ptr); beacls::synchronizeUVec(TIdx_uvecs[0]); const FLOAT_TYPE* TIdx0_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[0]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx0_dev_ptr = thrust::device_pointer_cast(TIdx0_ptr); const FLOAT_TYPE* deriv1_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[1]).ptr(); const FLOAT_TYPE* dx1_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv1_dev_ptr = thrust::device_pointer_cast(deriv1_ptr); thrust::device_ptr<const FLOAT_TYPE> dx1_dev_ptr = thrust::device_pointer_cast(dx1_ptr); const FLOAT_TYPE* TIdx1_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[1]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx1_dev_ptr = thrust::device_pointer_cast(TIdx1_ptr); const FLOAT_TYPE* deriv2_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[2]).ptr(); const FLOAT_TYPE* dx2_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv2_dev_ptr = thrust::device_pointer_cast(deriv2_ptr); thrust::device_ptr<const FLOAT_TYPE> dx2_dev_ptr = thrust::device_pointer_cast(dx2_ptr); const FLOAT_TYPE* TIdx2_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[2]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx2_dev_ptr = thrust::device_pointer_cast(TIdx2_ptr); auto src_Tuple = thrust::make_tuple(deriv0_dev_ptr, deriv1_dev_ptr, deriv2_dev_ptr, dx0_dev_ptr, dx1_dev_ptr, dx2_dev_ptr, TIdx0_dev_ptr, TIdx1_dev_ptr, TIdx2_dev_ptr ); auto src_ite = thrust::make_zip_iterator(src_Tuple); if (negate) { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_3dimNeg(TIderiv)); } else { thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[0].size(), hamValue_dev_ptr, GetHamValueTIdx_3dim(TIderiv)); } } break; default: beacls::synchronizeUVec(dx_uvecs[0]); for (size_t dimension = 0; dimension < num_of_dimensions; ++dimension) { const FLOAT_TYPE* deriv_ptr = beacls::UVec_<FLOAT_TYPE>(deriv_uvecs[dimension]).ptr(); const FLOAT_TYPE* dx_ptr = beacls::UVec_<FLOAT_TYPE>(dx_uvecs[dimension]).ptr(); thrust::device_ptr<const FLOAT_TYPE> deriv_dev_ptr = thrust::device_pointer_cast(deriv_ptr); thrust::device_ptr<const FLOAT_TYPE> dx_dev_ptr = thrust::device_pointer_cast(dx_ptr); if (dimension==0){ auto src_Tuple = thrust::make_tuple(deriv_dev_ptr, dx_dev_ptr); auto src_ite = thrust::make_zip_iterator(src_Tuple); thrust::transform(thrust::cuda::par.on(ham_stream), src_ite, src_ite + dx_uvecs[dimension].size(), hamValue_dev_ptr, GetHamValue3_dim0()); } else if (dimension==num_of_dimensions-1 && negate) { auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, deriv_dev_ptr, dx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); thrust::for_each(thrust::cuda::par.on(ham_stream), src_dst_ite, src_dst_ite + dx_uvecs[dimension].size(), GetHamValue3_dimNot0Neg()); }else { auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, deriv_dev_ptr, dx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); thrust::for_each(thrust::cuda::par.on(ham_stream), src_dst_ite, src_dst_ite + dx_uvecs[dimension].size(), GetHamValue3_dimNot0()); } } beacls::synchronizeUVec(TIdx_uvecs[0]); for (size_t dimension = 0; dimension < num_of_dimensions; ++dimension) { const FLOAT_TYPE* TIdx_ptr = beacls::UVec_<FLOAT_TYPE>(TIdx_uvecs[dimension]).ptr(); thrust::device_ptr<const FLOAT_TYPE> TIdx_dev_ptr = thrust::device_pointer_cast(TIdx_ptr); auto src_dst_Tuple = thrust::make_tuple(hamValue_dev_ptr, TIdx_dev_ptr); auto src_dst_ite = thrust::make_zip_iterator(src_dst_Tuple); if (dimension==num_of_dimensions-1 && negate) { thrust::for_each(thrust::cuda::par.on(ham_stream), src_dst_ite, src_dst_ite + TIdx_uvecs[dimension].size(), GetHamValue2Neg(TIderiv)); } else { thrust::for_each(thrust::cuda::par.on(ham_stream), src_dst_ite, src_dst_ite + TIdx_uvecs[dimension].size(), GetHamValue2(TIderiv)); } } } } } else { std::cerr << __FILE__ << ":" << __LINE__ << ":" << __FUNCTION__ << " Invalid data size" << std::endl; return false; } return true; } struct GetMax4 { template<typename Tuple> __host__ __device__ FLOAT_TYPE operator()(const Tuple v) const { const FLOAT_TYPE uu = thrust::get<0>(v); const FLOAT_TYPE ul = thrust::get<1>(v); const FLOAT_TYPE ll = thrust::get<2>(v); const FLOAT_TYPE lu = thrust::get<3>(v); const FLOAT_TYPE maxUUUL = max_float_type<FLOAT_TYPE>(abs_float_type<FLOAT_TYPE>(uu), abs_float_type<FLOAT_TYPE>(ul)); const FLOAT_TYPE maxLULL = max_float_type<FLOAT_TYPE>(abs_float_type<FLOAT_TYPE>(lu), abs_float_type<FLOAT_TYPE>(ll)); const FLOAT_TYPE maxUUULLULL = max_float_type<FLOAT_TYPE>(maxUUUL, maxLULL); return maxUUULLULL; } }; bool partialFunc_exec_cuda( beacls::UVec& alphas_uvec, const beacls::UVec& dxLL_dim, const beacls::UVec& dxLU_dim, const beacls::UVec& dxUL_dim, const beacls::UVec& dxUU_dim ) { const size_t length = dxUU_dim.size(); if (alphas_uvec.type() != dxUU_dim.type()) alphas_uvec = beacls::UVec(dxUU_dim.depth(), dxUU_dim.type(), length); else alphas_uvec.resize(length); const FLOAT_TYPE* dxUU_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxUU_dim).ptr(); const FLOAT_TYPE* dxUL_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxUL_dim).ptr(); const FLOAT_TYPE* dxLL_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxLL_dim).ptr(); const FLOAT_TYPE* dxLU_dim_ptr = beacls::UVec_<FLOAT_TYPE>(dxLU_dim).ptr(); FLOAT_TYPE* alphas = beacls::UVec_<FLOAT_TYPE>(alphas_uvec).ptr(); if ((dxUU_dim.type() == beacls::UVecType_Cuda) && (dxUL_dim.type() == beacls::UVecType_Cuda) && (dxLU_dim.type() == beacls::UVecType_Cuda) && (dxLL_dim.type() == beacls::UVecType_Cuda) ){ cudaStream_t alpha_stream = beacls::get_stream(alphas_uvec); thrust::device_ptr<FLOAT_TYPE> alphas_dev_ptr = thrust::device_pointer_cast(alphas); beacls::synchronizeUVec(dxUU_dim); beacls::synchronizeUVec(dxUL_dim); beacls::synchronizeUVec(dxLU_dim); beacls::synchronizeUVec(dxLL_dim); thrust::device_ptr<const FLOAT_TYPE> dxUU_dim_dev_ptr = thrust::device_pointer_cast(dxUU_dim_ptr); thrust::device_ptr<const FLOAT_TYPE> dxUL_dim_dev_ptr = thrust::device_pointer_cast(dxUL_dim_ptr); thrust::device_ptr<const FLOAT_TYPE> dxLL_dim_dev_ptr = thrust::device_pointer_cast(dxLL_dim_ptr); thrust::device_ptr<const FLOAT_TYPE> dxLU_dim_dev_ptr = thrust::device_pointer_cast(dxLU_dim_ptr); auto float_type4Tuple = thrust::make_tuple( dxUU_dim_dev_ptr, dxUL_dim_dev_ptr, dxLL_dim_dev_ptr, dxLU_dim_dev_ptr); auto float_type4Iterator = thrust::make_zip_iterator(float_type4Tuple); thrust::transform(thrust::cuda::par.on(alpha_stream), float_type4Iterator, float_type4Iterator + length, alphas_dev_ptr, GetMax4()); } else { for (size_t i = 0; i < length; ++i) { const FLOAT_TYPE max0 = max_float_type<FLOAT_TYPE>( abs_float_type<FLOAT_TYPE>(dxUU_dim_ptr[i]), abs_float_type<FLOAT_TYPE>(dxUL_dim_ptr[i])); const FLOAT_TYPE max1 = max_float_type<FLOAT_TYPE>( abs_float_type<FLOAT_TYPE>(dxLL_dim_ptr[i]), abs_float_type<FLOAT_TYPE>(dxLU_dim_ptr[i])); alphas[i] = max_float_type<FLOAT_TYPE>(max0, max1); } } return true; } #endif /* defined(USER_DEFINED_GPU_DYNSYS_FUNC) */ #endif /* defined(WITH_GPU) */
06557f7e61f51916015bcdc61efef3f2f95007e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <chrono> #include <iostream> #include <string> #include <vector> const int minWorkgroups = 2; const int maxWorkgroups = 2; const int numIterations = 10; const int expectedCount = 20480; // general int* var; int* d_var; // spin lock int* flag; int* d_flag; // petersons int* level; int* d_level; int* victim; int* d_victim; // bakery int* entering; int* d_entering; int* ticket; int* d_ticket; // dekkers int* dekker_flag; int* d_dekker_flag; int* turn; int* d_turn; __device__ bool other_thread_waiting(volatile int* _flag) { for (int i = 0; i < gridDim.x; i++) { if (i != blockIdx.x && _flag[i] == 1) { return true; } } return false; } __global__ void dekkers(volatile int* _flag, volatile int* _turn, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { _flag[blockIdx.x] = 1; while(other_thread_waiting(_flag)) { _flag[blockIdx.x] = 0; while(*_turn != -1 && *_turn != blockIdx.x); *_turn = blockIdx.x; _flag[blockIdx.x] = 1; } __threadfence(); *_var = *_var + 1; __threadfence(); *_turn = -1; _flag[blockIdx.x] = 0; } } } __global__ void bakery(volatile int* _entering, volatile int* _ticket, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { _entering[blockIdx.x] = 1; int max = 0; for (int j = 0; j < gridDim.x; j++) { if (_ticket[j] > max) { max = _ticket[j]; } } _ticket[blockIdx.x] = max + 1; __threadfence(); for (int j = 0; j < gridDim.x; j++) { while(j != gridDim.x && _entering[j] && (_ticket[j] < _ticket[blockIdx.x] || (_ticket[j] == _ticket[blockIdx.x] && j < blockIdx.x))); } __threadfence(); *_var = *_var + 1; __threadfence(); _entering[blockIdx.x] = 0; } } } __global__ void petersons(volatile int* _level, volatile int* _victim, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { for (int j = 0; j < gridDim.x - 1; j++) { _level[blockIdx.x] = j; _victim[j] = blockIdx.x; for (int k = 0; k < gridDim.x; k++) { while (k != blockIdx.x && _level[k] >= j && _victim[j] == blockIdx.x); } } __threadfence(); *_var = *_var + 1; __threadfence(); _level[blockIdx.x] = -1; } } } __global__ void spinLock(volatile int* _flag, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { while(atomicCAS((int*) _flag, 0, 1) == 1); __threadfence(); *_var = *_var + 1; __threadfence(); *_flag = 0; } } } void initializeBuffers(std::string testName) { var = (int*)malloc(1*sizeof(int)); hipMalloc(&d_var, 1*sizeof(int)); if (testName == "spin-lock") { flag = (int*)malloc(1*sizeof(int)); hipMalloc(&d_flag, 1*sizeof(int)); } else if (testName == "petersons") { level = (int*)malloc(maxWorkgroups*sizeof(int)); hipMalloc(&d_level, maxWorkgroups*sizeof(int)); victim = (int*)malloc(maxWorkgroups*sizeof(int)); hipMalloc(&d_victim, maxWorkgroups*sizeof(int)); } else if (testName == "bakery") { entering = (int*)malloc(maxWorkgroups*sizeof(int)); hipMalloc(&d_entering, maxWorkgroups*sizeof(int)); ticket = (int*)malloc(maxWorkgroups*sizeof(int)); hipMalloc(&d_ticket, maxWorkgroups*sizeof(int)); } else if (testName == "dekkers") { dekker_flag = (int*)malloc(maxWorkgroups*sizeof(int)); hipMalloc(&d_dekker_flag, maxWorkgroups*sizeof(int)); turn = (int*)malloc(1*sizeof(int)); hipMalloc(&d_turn, 1*sizeof(int)); } } void prepareBuffers(std::string testName) { if (testName == "spin-lock") { *flag = 0; hipMemcpy(d_flag, flag, 1*sizeof(int), hipMemcpyHostToDevice); } else if (testName == "petersons") { for (int i = 0; i < maxWorkgroups; i++) { level[i] = 0; victim[i] = 0; } hipMemcpy(d_level, level, maxWorkgroups*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_victim, victim, maxWorkgroups*sizeof(int), hipMemcpyHostToDevice); } else if (testName == "bakery") { for (int i = 0; i < maxWorkgroups; i++) { entering[i] = 0; ticket[i] = 0; } hipMemcpy(d_entering, entering, maxWorkgroups*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_ticket, ticket, maxWorkgroups*sizeof(int), hipMemcpyHostToDevice); } else if (testName == "dekkers") { for (int i = 0; i < maxWorkgroups; i++) { dekker_flag[i] = 0; } *turn = -1; hipMemcpy(d_dekker_flag, dekker_flag, maxWorkgroups*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_turn, turn, 1*sizeof(int), hipMemcpyHostToDevice); } } void freeBuffers(std::string testName) { hipFree(d_var); free(var); if (testName == "spin-lock") { hipFree(d_flag); free(flag); } else if (testName == "petersons") { hipFree(d_level); hipFree(d_victim); free(level); free(victim); } else if (testName == "bakery") { hipFree(d_entering); hipFree(d_ticket); free(entering); free(ticket); } else if (testName == "dekkers") { hipFree(d_dekker_flag); hipFree(d_turn); free(dekker_flag); free(turn); } } void runTest(std::string testName, int iterationsPerTest, int numWorkgroups) { if (testName == "spin-lock") { std::cout << "iterations per test: " << iterationsPerTest << "\n"; hipLaunchKernelGGL(( spinLock), dim3(numWorkgroups), dim3(1), 0, 0, d_flag, d_var, iterationsPerTest); } else if (testName == "petersons") { hipLaunchKernelGGL(( petersons), dim3(numWorkgroups), dim3(1), 0, 0, d_level, d_victim, d_var, iterationsPerTest); } else if (testName == "bakery") { hipLaunchKernelGGL(( bakery), dim3(numWorkgroups), dim3(1), 0, 0, d_entering, d_ticket, d_var, iterationsPerTest); } else if (testName == "dekkers") { hipLaunchKernelGGL(( dekkers), dim3(numWorkgroups), dim3(1), 0, 0, d_dekker_flag, d_turn, d_var, iterationsPerTest); } } int main(int argc, char* argv[]) { if (argc != 2) { std::cout << "Test name must be specified\n"; } std::string testName(argv[1]); srand (time(NULL)); std::cout << "Running Test" << testName << "\n"; initializeBuffers(testName); double sum = 0; std::chrono::time_point<std::chrono::system_clock> start, end; for (int numWorkgroups = minWorkgroups; numWorkgroups <= maxWorkgroups; numWorkgroups*=2) { std::cout << "\nTest workgroups " << numWorkgroups << "\n"; int iterationsPerTest = expectedCount/numWorkgroups; for (int i = 0; i < numIterations + 1; i++) { std::cout << "\ntest iteration " << i << "\n"; *var = 0; hipMemcpy(d_var, var, 1*sizeof(int), hipMemcpyHostToDevice); prepareBuffers(testName); start = std::chrono::system_clock::now(); runTest(testName, iterationsPerTest, numWorkgroups); end = std::chrono::system_clock::now(); hipMemcpy(var, d_var, 1*sizeof(int), hipMemcpyDeviceToHost); std::chrono::duration<double> result = end - start; if (i > 0) sum += result.count(); std::cout << "iteration time: " << result.count() << "s\n"; std::cout << "expected: " << expectedCount << ", actual: " << *var << "\n"; if (expectedCount != *var) { std::cout << "Expected not equal to actual!\n"; } } std::cout << "Average test iteration time: " << sum / numIterations << "s\n"; sum = 0; } freeBuffers(testName); return 0; }
06557f7e61f51916015bcdc61efef3f2f95007e7.cu
#include <stdio.h> #include <chrono> #include <iostream> #include <string> #include <vector> const int minWorkgroups = 2; const int maxWorkgroups = 2; const int numIterations = 10; const int expectedCount = 20480; // general int* var; int* d_var; // spin lock int* flag; int* d_flag; // petersons int* level; int* d_level; int* victim; int* d_victim; // bakery int* entering; int* d_entering; int* ticket; int* d_ticket; // dekkers int* dekker_flag; int* d_dekker_flag; int* turn; int* d_turn; __device__ bool other_thread_waiting(volatile int* _flag) { for (int i = 0; i < gridDim.x; i++) { if (i != blockIdx.x && _flag[i] == 1) { return true; } } return false; } __global__ void dekkers(volatile int* _flag, volatile int* _turn, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { _flag[blockIdx.x] = 1; while(other_thread_waiting(_flag)) { _flag[blockIdx.x] = 0; while(*_turn != -1 && *_turn != blockIdx.x); *_turn = blockIdx.x; _flag[blockIdx.x] = 1; } __threadfence(); *_var = *_var + 1; __threadfence(); *_turn = -1; _flag[blockIdx.x] = 0; } } } __global__ void bakery(volatile int* _entering, volatile int* _ticket, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { _entering[blockIdx.x] = 1; int max = 0; for (int j = 0; j < gridDim.x; j++) { if (_ticket[j] > max) { max = _ticket[j]; } } _ticket[blockIdx.x] = max + 1; __threadfence(); for (int j = 0; j < gridDim.x; j++) { while(j != gridDim.x && _entering[j] && (_ticket[j] < _ticket[blockIdx.x] || (_ticket[j] == _ticket[blockIdx.x] && j < blockIdx.x))); } __threadfence(); *_var = *_var + 1; __threadfence(); _entering[blockIdx.x] = 0; } } } __global__ void petersons(volatile int* _level, volatile int* _victim, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { for (int j = 0; j < gridDim.x - 1; j++) { _level[blockIdx.x] = j; _victim[j] = blockIdx.x; for (int k = 0; k < gridDim.x; k++) { while (k != blockIdx.x && _level[k] >= j && _victim[j] == blockIdx.x); } } __threadfence(); *_var = *_var + 1; __threadfence(); _level[blockIdx.x] = -1; } } } __global__ void spinLock(volatile int* _flag, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { while(atomicCAS((int*) _flag, 0, 1) == 1); __threadfence(); *_var = *_var + 1; __threadfence(); *_flag = 0; } } } void initializeBuffers(std::string testName) { var = (int*)malloc(1*sizeof(int)); cudaMalloc(&d_var, 1*sizeof(int)); if (testName == "spin-lock") { flag = (int*)malloc(1*sizeof(int)); cudaMalloc(&d_flag, 1*sizeof(int)); } else if (testName == "petersons") { level = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_level, maxWorkgroups*sizeof(int)); victim = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_victim, maxWorkgroups*sizeof(int)); } else if (testName == "bakery") { entering = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_entering, maxWorkgroups*sizeof(int)); ticket = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_ticket, maxWorkgroups*sizeof(int)); } else if (testName == "dekkers") { dekker_flag = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_dekker_flag, maxWorkgroups*sizeof(int)); turn = (int*)malloc(1*sizeof(int)); cudaMalloc(&d_turn, 1*sizeof(int)); } } void prepareBuffers(std::string testName) { if (testName == "spin-lock") { *flag = 0; cudaMemcpy(d_flag, flag, 1*sizeof(int), cudaMemcpyHostToDevice); } else if (testName == "petersons") { for (int i = 0; i < maxWorkgroups; i++) { level[i] = 0; victim[i] = 0; } cudaMemcpy(d_level, level, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_victim, victim, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); } else if (testName == "bakery") { for (int i = 0; i < maxWorkgroups; i++) { entering[i] = 0; ticket[i] = 0; } cudaMemcpy(d_entering, entering, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ticket, ticket, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); } else if (testName == "dekkers") { for (int i = 0; i < maxWorkgroups; i++) { dekker_flag[i] = 0; } *turn = -1; cudaMemcpy(d_dekker_flag, dekker_flag, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_turn, turn, 1*sizeof(int), cudaMemcpyHostToDevice); } } void freeBuffers(std::string testName) { cudaFree(d_var); free(var); if (testName == "spin-lock") { cudaFree(d_flag); free(flag); } else if (testName == "petersons") { cudaFree(d_level); cudaFree(d_victim); free(level); free(victim); } else if (testName == "bakery") { cudaFree(d_entering); cudaFree(d_ticket); free(entering); free(ticket); } else if (testName == "dekkers") { cudaFree(d_dekker_flag); cudaFree(d_turn); free(dekker_flag); free(turn); } } void runTest(std::string testName, int iterationsPerTest, int numWorkgroups) { if (testName == "spin-lock") { std::cout << "iterations per test: " << iterationsPerTest << "\n"; spinLock<<<numWorkgroups, 1>>>(d_flag, d_var, iterationsPerTest); } else if (testName == "petersons") { petersons<<<numWorkgroups, 1>>>(d_level, d_victim, d_var, iterationsPerTest); } else if (testName == "bakery") { bakery<<<numWorkgroups, 1>>>(d_entering, d_ticket, d_var, iterationsPerTest); } else if (testName == "dekkers") { dekkers<<<numWorkgroups, 1>>>(d_dekker_flag, d_turn, d_var, iterationsPerTest); } } int main(int argc, char* argv[]) { if (argc != 2) { std::cout << "Test name must be specified\n"; } std::string testName(argv[1]); srand (time(NULL)); std::cout << "Running Test" << testName << "\n"; initializeBuffers(testName); double sum = 0; std::chrono::time_point<std::chrono::system_clock> start, end; for (int numWorkgroups = minWorkgroups; numWorkgroups <= maxWorkgroups; numWorkgroups*=2) { std::cout << "\nTest workgroups " << numWorkgroups << "\n"; int iterationsPerTest = expectedCount/numWorkgroups; for (int i = 0; i < numIterations + 1; i++) { std::cout << "\ntest iteration " << i << "\n"; *var = 0; cudaMemcpy(d_var, var, 1*sizeof(int), cudaMemcpyHostToDevice); prepareBuffers(testName); start = std::chrono::system_clock::now(); runTest(testName, iterationsPerTest, numWorkgroups); end = std::chrono::system_clock::now(); cudaMemcpy(var, d_var, 1*sizeof(int), cudaMemcpyDeviceToHost); std::chrono::duration<double> result = end - start; if (i > 0) sum += result.count(); std::cout << "iteration time: " << result.count() << "s\n"; std::cout << "expected: " << expectedCount << ", actual: " << *var << "\n"; if (expectedCount != *var) { std::cout << "Expected not equal to actual!\n"; } } std::cout << "Average test iteration time: " << sum / numIterations << "s\n"; sum = 0; } freeBuffers(testName); return 0; }
3a844a5ce2f517db5732314cfafd3248ef193919.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void indexedmultadj(TYPE_T *x, TYPE_T *y) { /* y^i -= sum_j P^j' B^j' x^ij */ // global thread index int i = blockIdx.x*blockDim.x + threadIdx.x; // stay inside maximum dimensions if (i >= N) return; // iteration variables and misc. int jj, ll, mm, idx; TYPE_T newval; for (jj = 0; jj < J; jj++) { for (ll = 0; ll < L; ll++) { idx = i*K + P[jj*L + ll]; newval = y[idx]; for (mm = 0; mm < M; mm++) { newval -= B[jj*(L*M) + mm*L + ll]*x[jj*(N*M) + i*M + mm]; } y[idx] = newval; } } } __global__ void indexedmult(TYPE_T *x, TYPE_T *y) { /* y^ij -= B^j P^j x^i */ // global thread index int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int m = blockIdx.z*blockDim.z + threadIdx.z; // stay inside maximum dimensions if (i >= N || j >= J || m >= M) return; // iteration variables and misc. int idx = j*(N*M) + i*M + m; TYPE_T newval = y[idx]; for (int ll = 0; ll < L; ll++) { newval -= B[j*(L*M) + m*L + ll]*x[i*K + P[j*L + ll]]; } y[idx] = newval; }
3a844a5ce2f517db5732314cfafd3248ef193919.cu
__global__ void indexedmultadj(TYPE_T *x, TYPE_T *y) { /* y^i -= sum_j P^j' B^j' x^ij */ // global thread index int i = blockIdx.x*blockDim.x + threadIdx.x; // stay inside maximum dimensions if (i >= N) return; // iteration variables and misc. int jj, ll, mm, idx; TYPE_T newval; for (jj = 0; jj < J; jj++) { for (ll = 0; ll < L; ll++) { idx = i*K + P[jj*L + ll]; newval = y[idx]; for (mm = 0; mm < M; mm++) { newval -= B[jj*(L*M) + mm*L + ll]*x[jj*(N*M) + i*M + mm]; } y[idx] = newval; } } } __global__ void indexedmult(TYPE_T *x, TYPE_T *y) { /* y^ij -= B^j P^j x^i */ // global thread index int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int m = blockIdx.z*blockDim.z + threadIdx.z; // stay inside maximum dimensions if (i >= N || j >= J || m >= M) return; // iteration variables and misc. int idx = j*(N*M) + i*M + m; TYPE_T newval = y[idx]; for (int ll = 0; ll < L; ll++) { newval -= B[j*(L*M) + m*L + ll]*x[i*K + P[j*L + ll]]; } y[idx] = newval; }
243b1cb2c6485b43333af95342ed601d13b483e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> //static const int WORK_SIZE = 256; // ./B03 60 5555 5555 25 /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ unsigned int bitreverse(unsigned int number) { number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4); number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2); number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1); return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void bitreverse(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = bitreverse(idata[threadIdx.x]); } __global__ void multiply(int *a, int *b, int *c, int a_r,int a_c, int b_r, int b_c) { //c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; int index = threadIdx.x + blockIdx.x * blockDim.x; // c is a_r b_c int row = index/b_c; int column = index % b_c; int i; for(i=0; i<a_c;i++) { c[index] += a[row+i]*b[column+(i*b_c)]; } } void printMatrix(int* m, int rows, int columns) { int i; int j; for(i=0; i<rows;i++) { for(j=0;j<columns;j++) { printf("%d\t",m[i*columns+j]); } printf("\n"); } printf("\n"); } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char *argv[]) { struct timeval t0; struct timeval t1; int a_r = atoi(argv[1]); // count of rows from A int a_c = atoi(argv[2]); // column from A int b_r = atoi(argv[3]); // count of rows from B int b_c = atoi(argv[4]); // column from B if(a_c != b_r) { printf("\n\tError! \n\tPlease match the size of colums of A with the size of rows of B!\n\n"); return -1; } int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c // int size = i * sizeof(int); a = (int *)malloc(a_r*a_c * sizeof(int)); b = (int *)malloc(b_r*b_c * sizeof(int)); c = (int *)malloc(a_r*b_c* sizeof(int)); // Allocate space for device copies of a, b, c hipMalloc((void **)&d_a, a_r*a_c * sizeof(int)); hipMalloc((void **)&d_b, b_r*b_c * sizeof(int)); hipMalloc((void **)&d_c, a_r*b_c * sizeof(int)); int nDevices; hipGetDeviceCount(&nDevices); int THREADS_PER_BLOCK = 0; for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf(" Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Threads Per Block: %d\n\n", prop.maxThreadsPerBlock); THREADS_PER_BLOCK = prop.maxThreadsPerBlock; } int i = 0; int j = 0; for(i =0; i < a_r; i++) { for(j=0; j < a_c; j++) { a[i*a_c+j] = 1; //rand() % 100; } } for(i=0; i < b_r; i++) { for(j=0; j < b_c; j++) { b[i*b_c+j] = 1; //rand() % 100; } } for(i=0; i < a_r; i++) { for(j= 0; j < b_c; j++) { c[i*b_c+j] = 0; } } gettimeofday(&t0,0); // Copy inputs to device hipMemcpy(d_a, a, a_r*a_c * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, b_r*b_c * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_c, c, a_r*b_c * sizeof(int), hipMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks int blocks = ceil((float)a_r*b_c/1024); // round up hipLaunchKernelGGL(( multiply), dim3(blocks),dim3(1024), 0, 0, d_a, d_b, d_c, a_r,a_c, b_r, b_c); // Copy result back to host hipMemcpy(c, d_c, a_r*b_c * sizeof(int), hipMemcpyDeviceToHost); gettimeofday(&t1,0); double time_spent = (t1.tv_sec-t0.tv_sec) + (double)(t1.tv_usec-t0.tv_usec)/1000000; printMatrix(a, a_r, a_c); printMatrix(b, b_r, b_c); printMatrix(c, a_r, b_c); printf("Time Calculated: %f\n\n", time_spent); printf("Block Count: %d\n",blocks); return 0; }
243b1cb2c6485b43333af95342ed601d13b483e6.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> //static const int WORK_SIZE = 256; // ./B03 60 5555 5555 25 /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ unsigned int bitreverse(unsigned int number) { number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4); number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2); number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1); return number; } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void bitreverse(void *data) { unsigned int *idata = (unsigned int*) data; idata[threadIdx.x] = bitreverse(idata[threadIdx.x]); } __global__ void multiply(int *a, int *b, int *c, int a_r,int a_c, int b_r, int b_c) { //c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; int index = threadIdx.x + blockIdx.x * blockDim.x; // c is a_r b_c int row = index/b_c; int column = index % b_c; int i; for(i=0; i<a_c;i++) { c[index] += a[row+i]*b[column+(i*b_c)]; } } void printMatrix(int* m, int rows, int columns) { int i; int j; for(i=0; i<rows;i++) { for(j=0;j<columns;j++) { printf("%d\t",m[i*columns+j]); } printf("\n"); } printf("\n"); } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char *argv[]) { struct timeval t0; struct timeval t1; int a_r = atoi(argv[1]); // count of rows from A int a_c = atoi(argv[2]); // column from A int b_r = atoi(argv[3]); // count of rows from B int b_c = atoi(argv[4]); // column from B if(a_c != b_r) { printf("\n\tError! \n\tPlease match the size of colums of A with the size of rows of B!\n\n"); return -1; } int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c // int size = i * sizeof(int); a = (int *)malloc(a_r*a_c * sizeof(int)); b = (int *)malloc(b_r*b_c * sizeof(int)); c = (int *)malloc(a_r*b_c* sizeof(int)); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, a_r*a_c * sizeof(int)); cudaMalloc((void **)&d_b, b_r*b_c * sizeof(int)); cudaMalloc((void **)&d_c, a_r*b_c * sizeof(int)); int nDevices; cudaGetDeviceCount(&nDevices); int THREADS_PER_BLOCK = 0; for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf(" Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Threads Per Block: %d\n\n", prop.maxThreadsPerBlock); THREADS_PER_BLOCK = prop.maxThreadsPerBlock; } int i = 0; int j = 0; for(i =0; i < a_r; i++) { for(j=0; j < a_c; j++) { a[i*a_c+j] = 1; //rand() % 100; } } for(i=0; i < b_r; i++) { for(j=0; j < b_c; j++) { b[i*b_c+j] = 1; //rand() % 100; } } for(i=0; i < a_r; i++) { for(j= 0; j < b_c; j++) { c[i*b_c+j] = 0; } } gettimeofday(&t0,0); // Copy inputs to device cudaMemcpy(d_a, a, a_r*a_c * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, b_r*b_c * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, a_r*b_c * sizeof(int), cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks int blocks = ceil((float)a_r*b_c/1024); // round up multiply<<<blocks,1024>>>(d_a, d_b, d_c, a_r,a_c, b_r, b_c); // Copy result back to host cudaMemcpy(c, d_c, a_r*b_c * sizeof(int), cudaMemcpyDeviceToHost); gettimeofday(&t1,0); double time_spent = (t1.tv_sec-t0.tv_sec) + (double)(t1.tv_usec-t0.tv_usec)/1000000; printMatrix(a, a_r, a_c); printMatrix(b, b_r, b_c); printMatrix(c, a_r, b_c); printf("Time Calculated: %f\n\n", time_spent); printf("Block Count: %d\n",blocks); return 0; }
db932c35adb8be8e50ab8bcf47d599a79373a9ae.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <thrust/gather.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/sequence.h> __THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN template <class Vector> void TestGatherSimple(void) { typedef typename Vector::value_type T; Vector map(5); // gather indices Vector src(8); // source vector Vector dst(5); // destination vector map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; thrust::gather(map.begin(), map.end(), src.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 6); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 1); ASSERT_EQUAL(dst[3], 7); ASSERT_EQUAL(dst[4], 2); } DECLARE_VECTOR_UNITTEST(TestGatherSimple); struct my_tag : thrust::device_system_tag {}; template<typename InputIterator, typename RandomAccessIterator, typename OutputIterator> OutputIterator gather(my_tag, InputIterator , InputIterator, RandomAccessIterator, OutputIterator result) { *result = 13; return result; } void TestGatherDispatch() { thrust::device_vector<int> vec(1); thrust::gather(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.end()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestGatherDispatch); template <typename T> void TestGather(const size_t n) { const size_t source_size = ::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; // gather destination thrust::host_vector<T> h_output(n); thrust::device_vector<T> d_output(n); thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin()); thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestGather); template <typename T> void TestGatherToDiscardIterator(const size_t n) { const size_t source_size = ::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::discard_iterator<> h_result = thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), thrust::make_discard_iterator()); thrust::discard_iterator<> d_result = thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), thrust::make_discard_iterator()); thrust::discard_iterator<> reference(n); ASSERT_EQUAL_QUIET(reference, h_result); ASSERT_EQUAL_QUIET(reference, d_result); } DECLARE_VARIABLE_UNITTEST(TestGatherToDiscardIterator); template <class Vector> void TestGatherIfSimple(void) { typedef typename Vector::value_type T; Vector flg(5); // predicate array Vector map(5); // gather indices Vector src(8); // source vector Vector dst(5); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 7); ASSERT_EQUAL(dst[4], 0); } DECLARE_VECTOR_UNITTEST(TestGatherIfSimple); template <typename T> struct is_even_gather_if { __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator, typename OutputIterator> OutputIterator gather_if(my_tag, InputIterator1 map_first, InputIterator1 map_last, InputIterator2 stencil, RandomAccessIterator input_first, OutputIterator result) { *result = 13; return result; } void TestGatherIfDispatch() { thrust::device_vector<int> vec(1); thrust::gather_if(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.end()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestGatherIfDispatch); template <typename T> void TestGatherIf(const size_t n) { const size_t source_size = ::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; // gather stencil thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_stencil[i] = h_stencil[i] % 2; thrust::device_vector<unsigned int> d_stencil = h_stencil; // gather destination thrust::host_vector<T> h_output(n); thrust::device_vector<T> d_output(n); thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>()); thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestGatherIf); template <typename T> void TestGatherIfToDiscardIterator(const size_t n) { const size_t source_size = ::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; // gather stencil thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_stencil[i] = h_stencil[i] % 2; thrust::device_vector<unsigned int> d_stencil = h_stencil; thrust::discard_iterator<> h_result = thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>()); thrust::discard_iterator<> d_result = thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>()); thrust::discard_iterator<> reference(n); ASSERT_EQUAL_QUIET(reference, h_result); ASSERT_EQUAL_QUIET(reference, d_result); } DECLARE_VARIABLE_UNITTEST(TestGatherIfToDiscardIterator); template <typename Vector> void TestGatherCountingIterator(void) { #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_DEBUG != 0) KNOWN_FAILURE; #endif typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::gather(map.begin(), map.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::gather(thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)source.size()), source.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::gather(thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)output.size()), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestGatherCountingIterator); __THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
db932c35adb8be8e50ab8bcf47d599a79373a9ae.cu
#include <unittest/unittest.h> #include <thrust/gather.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/sequence.h> __THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN template <class Vector> void TestGatherSimple(void) { typedef typename Vector::value_type T; Vector map(5); // gather indices Vector src(8); // source vector Vector dst(5); // destination vector map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; thrust::gather(map.begin(), map.end(), src.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 6); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 1); ASSERT_EQUAL(dst[3], 7); ASSERT_EQUAL(dst[4], 2); } DECLARE_VECTOR_UNITTEST(TestGatherSimple); struct my_tag : thrust::device_system_tag {}; template<typename InputIterator, typename RandomAccessIterator, typename OutputIterator> OutputIterator gather(my_tag, InputIterator , InputIterator, RandomAccessIterator, OutputIterator result) { *result = 13; return result; } void TestGatherDispatch() { thrust::device_vector<int> vec(1); thrust::gather(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.end()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestGatherDispatch); template <typename T> void TestGather(const size_t n) { const size_t source_size = std::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; // gather destination thrust::host_vector<T> h_output(n); thrust::device_vector<T> d_output(n); thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin()); thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestGather); template <typename T> void TestGatherToDiscardIterator(const size_t n) { const size_t source_size = std::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::discard_iterator<> h_result = thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), thrust::make_discard_iterator()); thrust::discard_iterator<> d_result = thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), thrust::make_discard_iterator()); thrust::discard_iterator<> reference(n); ASSERT_EQUAL_QUIET(reference, h_result); ASSERT_EQUAL_QUIET(reference, d_result); } DECLARE_VARIABLE_UNITTEST(TestGatherToDiscardIterator); template <class Vector> void TestGatherIfSimple(void) { typedef typename Vector::value_type T; Vector flg(5); // predicate array Vector map(5); // gather indices Vector src(8); // source vector Vector dst(5); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 7); ASSERT_EQUAL(dst[4], 0); } DECLARE_VECTOR_UNITTEST(TestGatherIfSimple); template <typename T> struct is_even_gather_if { __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator, typename OutputIterator> OutputIterator gather_if(my_tag, InputIterator1 map_first, InputIterator1 map_last, InputIterator2 stencil, RandomAccessIterator input_first, OutputIterator result) { *result = 13; return result; } void TestGatherIfDispatch() { thrust::device_vector<int> vec(1); thrust::gather_if(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.end()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestGatherIfDispatch); template <typename T> void TestGatherIf(const size_t n) { const size_t source_size = std::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; // gather stencil thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_stencil[i] = h_stencil[i] % 2; thrust::device_vector<unsigned int> d_stencil = h_stencil; // gather destination thrust::host_vector<T> h_output(n); thrust::device_vector<T> d_output(n); thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>()); thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestGatherIf); template <typename T> void TestGatherIfToDiscardIterator(const size_t n) { const size_t source_size = std::min((size_t) 10, 2 * n); // source vectors to gather from thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size); thrust::device_vector<T> d_source = h_source; // gather indices thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % source_size; thrust::device_vector<unsigned int> d_map = h_map; // gather stencil thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_stencil[i] = h_stencil[i] % 2; thrust::device_vector<unsigned int> d_stencil = h_stencil; thrust::discard_iterator<> h_result = thrust::gather_if(h_map.begin(), h_map.end(), h_stencil.begin(), h_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>()); thrust::discard_iterator<> d_result = thrust::gather_if(d_map.begin(), d_map.end(), d_stencil.begin(), d_source.begin(), thrust::make_discard_iterator(), is_even_gather_if<unsigned int>()); thrust::discard_iterator<> reference(n); ASSERT_EQUAL_QUIET(reference, h_result); ASSERT_EQUAL_QUIET(reference, d_result); } DECLARE_VARIABLE_UNITTEST(TestGatherIfToDiscardIterator); template <typename Vector> void TestGatherCountingIterator(void) { #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_DEBUG != 0) KNOWN_FAILURE; #endif typedef typename Vector::value_type T; Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::gather(map.begin(), map.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::gather(thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)source.size()), source.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::gather(thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)output.size()), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_VECTOR_UNITTEST(TestGatherCountingIterator); __THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
f698131acff32f9b07e85ac122dc593709f182e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define N 30 void add(int *X,int *Y,int *Z) { for(int i=0;i<N;i++) for(int j=0;j<N;j++) Z[i*N+j] = X[i*N+j]+Y[i*N+j]; } __global__ void add_kernel(int *X,int *Y,int *Z) { int i = threadIdx.x; int j = threadIdx.y; Z[i*N+j] = X[i*N+j]+Y[i*N+j]; } int main() { int X[N*N]; int Y[N*N]; for(int i=0;i<N;i++) for(int j=0;j<N;j++) { X[i*N+j]=-1; Y[i*N+j]= 1; } //Outout matrix int Z[N*N]; int *d_X,*d_Y,*d_Z; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc((void**) &d_X, (N*N)*sizeof(int)); hipMalloc((void**) &d_Y, (N*N)*sizeof(int)); hipMalloc((void**) &d_Z, (N*N)*sizeof(int)); hipMemcpy(d_X, &X,(N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_Y, &Y,(N*N)*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid(13,13,1); dim3 dimBlock(22,22,1); hipEventRecord(start); hipLaunchKernelGGL(( add_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_X,d_Y,d_Z); hipEventRecord(stop); //add(X, Y, Z); hipMemcpy(&Z, d_Z,(N*N)*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_X); hipFree(d_Y); hipFree(d_Z); hipEventSynchronize(stop); float Timeused = 0; hipEventElapsedTime(&Timeused, start, stop); for(int i=0;i<N;i++) for(int j=0;j<N;j++){ printf("%d ",Z[i*N+j]); } printf("\n"); printf("Time used:%f ",Timeused); return -1; }
f698131acff32f9b07e85ac122dc593709f182e7.cu
#include<stdio.h> #define N 30 void add(int *X,int *Y,int *Z) { for(int i=0;i<N;i++) for(int j=0;j<N;j++) Z[i*N+j] = X[i*N+j]+Y[i*N+j]; } __global__ void add_kernel(int *X,int *Y,int *Z) { int i = threadIdx.x; int j = threadIdx.y; Z[i*N+j] = X[i*N+j]+Y[i*N+j]; } int main() { int X[N*N]; int Y[N*N]; for(int i=0;i<N;i++) for(int j=0;j<N;j++) { X[i*N+j]=-1; Y[i*N+j]= 1; } //Outout matrix int Z[N*N]; int *d_X,*d_Y,*d_Z; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**) &d_X, (N*N)*sizeof(int)); cudaMalloc((void**) &d_Y, (N*N)*sizeof(int)); cudaMalloc((void**) &d_Z, (N*N)*sizeof(int)); cudaMemcpy(d_X, &X,(N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, &Y,(N*N)*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(13,13,1); dim3 dimBlock(22,22,1); cudaEventRecord(start); add_kernel<<<dimGrid, dimBlock>>>(d_X,d_Y,d_Z); cudaEventRecord(stop); //add(X, Y, Z); cudaMemcpy(&Z, d_Z,(N*N)*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_X); cudaFree(d_Y); cudaFree(d_Z); cudaEventSynchronize(stop); float Timeused = 0; cudaEventElapsedTime(&Timeused, start, stop); for(int i=0;i<N;i++) for(int j=0;j<N;j++){ printf("%d ",Z[i*N+j]); } printf("\n"); printf("Time used:%f ",Timeused); return -1; }
91672b13fb7583d6c19a163633dc550f8fe66825.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define TILE_WIDTH 16 __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int ncols){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float PValue = 0; //PValue is used to store element of the output MatrixMulKernel int k = 0; if(row<ncols && col<ncols){ for(k = 0; k < ncols; k++){ float Melement = Md[row * ncols + k]; float Nelement = Nd[k * ncols + col]; PValue += Melement * Nelement; } Pd[row * ncols +col] = PValue; } } int main(int argc, char **argv){ int i,j; int Width; printf("Enter Width: "); scanf("%d", &Width); int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float *Md, *Nd, *Pd; int newValue = (Width + TILE_WIDTH -1)/TILE_WIDTH; for(i = 0; i < Width; i++){ for(j = 0; j < Width; j++){ M[i][j] = 1; N[i][j] = 2; } } hipMalloc((void**)&Md, size); hipMalloc((void**)&Nd, size); hipMalloc((void**)&Pd, size); hipMemcpy(Md, M, size, hipMemcpyHostToDevice); hipMemcpy(Nd, N, size, hipMemcpyHostToDevice); //setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(newValue, newValue); //launch the device computation thread! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, Width); //read P from the device hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost); //free device matrices hipFree(Md); hipFree(Nd); hipFree(Pd); for(i = 0; i < Width; i++){ for(j = 0; j < Width; j++){ printf("%.2f ", P[i][j]); } printf("\n"); } }
91672b13fb7583d6c19a163633dc550f8fe66825.cu
#include <stdio.h> #include <math.h> #define TILE_WIDTH 16 __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int ncols){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float PValue = 0; //PValue is used to store element of the output MatrixMulKernel int k = 0; if(row<ncols && col<ncols){ for(k = 0; k < ncols; k++){ float Melement = Md[row * ncols + k]; float Nelement = Nd[k * ncols + col]; PValue += Melement * Nelement; } Pd[row * ncols +col] = PValue; } } int main(int argc, char **argv){ int i,j; int Width; printf("Enter Width: "); scanf("%d", &Width); int size = Width * Width * sizeof(float); float M[Width][Width], N[Width][Width], P[Width][Width]; float *Md, *Nd, *Pd; int newValue = (Width + TILE_WIDTH -1)/TILE_WIDTH; for(i = 0; i < Width; i++){ for(j = 0; j < Width; j++){ M[i][j] = 1; N[i][j] = 2; } } cudaMalloc((void**)&Md, size); cudaMalloc((void**)&Nd, size); cudaMalloc((void**)&Pd, size); cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice); cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice); //setup the execution configuration dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(newValue, newValue); //launch the device computation thread! MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width); //read P from the device cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); //free device matrices cudaFree(Md); cudaFree(Nd); cudaFree(Pd); for(i = 0; i < Width; i++){ for(j = 0; j < Width; j++){ printf("%.2f ", P[i][j]); } printf("\n"); } }
72f4c6503deec1114ebef59344483af717ed8fa4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <utilities/cuda_utils.hpp> #include <type_traits> #include <thrust/device_vector.h> #include <thrust/count.h> #include <utility/utility.hpp> #include <cuspatial/query.hpp> namespace { /** *@brief Thrust functor for spatial window query on point data (x/y) * */ template<typename T> struct spatial_window_functor_xy { T left, bottom, right, top; __device__ spatial_window_functor_xy(T left, T bottom, T right, T top) : left(left), bottom(bottom), right(right), top(top) {} __device__ bool operator()(const thrust::tuple<T, T>& t) { T x= thrust::get<0>(t); T y= thrust::get<1>(t); return x > left && x < right && y > bottom && y < top; } }; struct sw_point_functor { template <typename T> static constexpr bool is_supported() { return std::is_floating_point<T>::value; } template <typename T> T get_scalar(const gdf_scalar v) { T ret{}; // Safe type pun, compiler should optimize away the memcpy memcpy(&ret, &v.data, sizeof(T)); return ret; } template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr> std::pair<gdf_column,gdf_column> operator()(const gdf_scalar left, const gdf_scalar bottom, const gdf_scalar right, const gdf_scalar top, const gdf_column& x, const gdf_column& y) { T q_left = get_scalar<T>(left); T q_right = get_scalar<T>(right); T q_bottom = get_scalar<T>(bottom); T q_top = get_scalar<T>(top); CUDF_EXPECTS(q_left < q_right, "left must be less than right in a spatial window query"); CUDF_EXPECTS(q_bottom < q_top, "bottom must be less than top in a spatial window query"); hipStream_t stream{0}; auto exec_policy = rmm::exec_policy(stream)->on(stream); auto in_it = thrust::make_zip_iterator(thrust::make_tuple( static_cast<T*>(x.data), static_cast<T*>(y.data))); int num_hits = thrust::count_if(exec_policy, in_it, in_it + x.size, spatial_window_functor_xy<T>(q_left, q_bottom, q_right, q_top)); T* temp_x{nullptr}; T* temp_y{nullptr}; RMM_TRY( RMM_ALLOC(&temp_x, num_hits * sizeof(T), 0) ); RMM_TRY( RMM_ALLOC(&temp_y, num_hits * sizeof(T), 0) ); auto out_it = thrust::make_zip_iterator(thrust::make_tuple(temp_x, temp_y)); thrust::copy_if(exec_policy, in_it, in_it + x.size, out_it, spatial_window_functor_xy<T>(q_left, q_bottom, q_right, q_top)); gdf_column out_x{}, out_y{}; gdf_column_view_augmented(&out_x, temp_x, nullptr, num_hits, x.dtype, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "x"); gdf_column_view_augmented(&out_y, temp_y, nullptr, num_hits, y.dtype, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "y"); return std::make_pair(out_x, out_y); } template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr> std::pair<gdf_column,gdf_column> operator()(const gdf_scalar left, const gdf_scalar bottom, const gdf_scalar right, const gdf_scalar top, const gdf_column& x, const gdf_column& y) { CUDF_FAIL("Non-floating point operation is not supported"); } }; } // namespace anonymous namespace cuspatial { /* * Return all points (x,y) that fall within a query window (x1,y1,x2,y2) * see query.hpp */ std::pair<gdf_column,gdf_column> spatial_window_points(const gdf_scalar& left, const gdf_scalar& bottom, const gdf_scalar& right, const gdf_scalar& top, const gdf_column& x, const gdf_column& y) { CUDF_EXPECTS(x.dtype == y.dtype, "point type mismatch between x/y arrays"); CUDF_EXPECTS(x.size == y.size, "#of points mismatch between x/y arrays"); CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0, "this version does not support point data that contains nulls"); std::pair<gdf_column,gdf_column> res = cudf::type_dispatcher(x.dtype, sw_point_functor(), left, bottom, right, top, x, y); return res; } }// namespace cuspatial
72f4c6503deec1114ebef59344483af717ed8fa4.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <utilities/cuda_utils.hpp> #include <type_traits> #include <thrust/device_vector.h> #include <thrust/count.h> #include <utility/utility.hpp> #include <cuspatial/query.hpp> namespace { /** *@brief Thrust functor for spatial window query on point data (x/y) * */ template<typename T> struct spatial_window_functor_xy { T left, bottom, right, top; __device__ spatial_window_functor_xy(T left, T bottom, T right, T top) : left(left), bottom(bottom), right(right), top(top) {} __device__ bool operator()(const thrust::tuple<T, T>& t) { T x= thrust::get<0>(t); T y= thrust::get<1>(t); return x > left && x < right && y > bottom && y < top; } }; struct sw_point_functor { template <typename T> static constexpr bool is_supported() { return std::is_floating_point<T>::value; } template <typename T> T get_scalar(const gdf_scalar v) { T ret{}; // Safe type pun, compiler should optimize away the memcpy memcpy(&ret, &v.data, sizeof(T)); return ret; } template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr> std::pair<gdf_column,gdf_column> operator()(const gdf_scalar left, const gdf_scalar bottom, const gdf_scalar right, const gdf_scalar top, const gdf_column& x, const gdf_column& y) { T q_left = get_scalar<T>(left); T q_right = get_scalar<T>(right); T q_bottom = get_scalar<T>(bottom); T q_top = get_scalar<T>(top); CUDF_EXPECTS(q_left < q_right, "left must be less than right in a spatial window query"); CUDF_EXPECTS(q_bottom < q_top, "bottom must be less than top in a spatial window query"); cudaStream_t stream{0}; auto exec_policy = rmm::exec_policy(stream)->on(stream); auto in_it = thrust::make_zip_iterator(thrust::make_tuple( static_cast<T*>(x.data), static_cast<T*>(y.data))); int num_hits = thrust::count_if(exec_policy, in_it, in_it + x.size, spatial_window_functor_xy<T>(q_left, q_bottom, q_right, q_top)); T* temp_x{nullptr}; T* temp_y{nullptr}; RMM_TRY( RMM_ALLOC(&temp_x, num_hits * sizeof(T), 0) ); RMM_TRY( RMM_ALLOC(&temp_y, num_hits * sizeof(T), 0) ); auto out_it = thrust::make_zip_iterator(thrust::make_tuple(temp_x, temp_y)); thrust::copy_if(exec_policy, in_it, in_it + x.size, out_it, spatial_window_functor_xy<T>(q_left, q_bottom, q_right, q_top)); gdf_column out_x{}, out_y{}; gdf_column_view_augmented(&out_x, temp_x, nullptr, num_hits, x.dtype, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "x"); gdf_column_view_augmented(&out_y, temp_y, nullptr, num_hits, y.dtype, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "y"); return std::make_pair(out_x, out_y); } template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr> std::pair<gdf_column,gdf_column> operator()(const gdf_scalar left, const gdf_scalar bottom, const gdf_scalar right, const gdf_scalar top, const gdf_column& x, const gdf_column& y) { CUDF_FAIL("Non-floating point operation is not supported"); } }; } // namespace anonymous namespace cuspatial { /* * Return all points (x,y) that fall within a query window (x1,y1,x2,y2) * see query.hpp */ std::pair<gdf_column,gdf_column> spatial_window_points(const gdf_scalar& left, const gdf_scalar& bottom, const gdf_scalar& right, const gdf_scalar& top, const gdf_column& x, const gdf_column& y) { CUDF_EXPECTS(x.dtype == y.dtype, "point type mismatch between x/y arrays"); CUDF_EXPECTS(x.size == y.size, "#of points mismatch between x/y arrays"); CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0, "this version does not support point data that contains nulls"); std::pair<gdf_column,gdf_column> res = cudf::type_dispatcher(x.dtype, sw_point_functor(), left, bottom, right, top, x, y); return res; } }// namespace cuspatial
db667acb2b8a0e7f68c303c6044e321d59ba837f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #ifdef WITH_CUDA #include "../box_iou_rotated/box_iou_rotated_utils.h" #endif // TODO avoid this when pytorch supports "same directory" hipification #define WITH_HIP #ifdef WITH_HIP #include "box_iou_rotated/box_iou_rotated_utils.h" #endif using namespace detectron2; namespace { int const threadsPerBlock = sizeof(unsigned long long) * 8; } template <typename T> __global__ void nms_rotated_cuda_kernel( const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); // Compared to nms_cuda_kernel, where each box is represented with 4 values // (x1, y1, x2, y2), each rotated box is represented with 5 values // (x_center, y_center, width, height, angle_degrees) here. __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { // Instead of devIoU used by original horizontal nms, here // we use the single_box_iou_rotated function from box_iou_rotated_utils.h if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } namespace detectron2 { at::Tensor nms_rotated_cuda( // input must be contiguous const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { // using scalar_t = float; AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device()); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); auto dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES( dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { hipLaunchKernelGGL(( nms_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(hipGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); } } // namespace detectron2
db667acb2b8a0e7f68c303c6044e321d59ba837f.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #ifdef WITH_CUDA #include "../box_iou_rotated/box_iou_rotated_utils.h" #endif // TODO avoid this when pytorch supports "same directory" hipification #define WITH_HIP #ifdef WITH_HIP #include "box_iou_rotated/box_iou_rotated_utils.h" #endif using namespace detectron2; namespace { int const threadsPerBlock = sizeof(unsigned long long) * 8; } template <typename T> __global__ void nms_rotated_cuda_kernel( const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); // Compared to nms_cuda_kernel, where each box is represented with 4 values // (x1, y1, x2, y2), each rotated box is represented with 5 values // (x_center, y_center, width, height, angle_degrees) here. __shared__ T block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { // Instead of devIoU used by original horizontal nms, here // we use the single_box_iou_rotated function from box_iou_rotated_utils.h if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) > iou_threshold) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } namespace detectron2 { at::Tensor nms_rotated_cuda( // input must be contiguous const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { // using scalar_t = float; AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(dets.device()); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); auto dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES( dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { nms_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>( dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(cudaGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); } } // namespace detectron2
5ba9a318a903716c980c513ed148f48f25077ed1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" struct MscData { float a; float b; }; struct UrbanMsc { const MscData& data; __device__ decltype(auto) make_calc_thing() const { return [this](float step) { return this->data.a * step + this->data.b; }; } }; template <class F> __device__ void apply_track(F calc_thing, float step, float* result) { *result = calc_thing(step); } __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { UrbanMsc msc{data}; apply_track(msc.make_calc_thing(), step[threadIdx.x], &result[threadIdx.x]); }
5ba9a318a903716c980c513ed148f48f25077ed1.cu
struct MscData { float a; float b; }; struct UrbanMsc { const MscData& data; __device__ decltype(auto) make_calc_thing() const { return [this](float step) { return this->data.a * step + this->data.b; }; } }; template <class F> __device__ void apply_track(F calc_thing, float step, float* result) { *result = calc_thing(step); } __global__ void apply_kernel(const MscData data, float const* __restrict__ step, float* __restrict__ result) { UrbanMsc msc{data}; apply_track(msc.make_calc_thing(), step[threadIdx.x], &result[threadIdx.x]); }
676d0ec79202b90bcef63003b6d1239daa059711.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gbkfit/cuda/kernels.hpp" #include "gbkfit/cuda/wrapper.hpp" namespace gbkfit::cuda { constexpr int BLOCK_SIZE = 256; template<typename T> void Wrapper<T>::math_complex_multiply_and_scale( typename cufft<T>::complex* arr1, typename cufft<T>::complex* arr2, int n, T scale) { dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::math_complex_multiply_and_scale), dim3(gsize), dim3(bsize), 0, 0, arr1, arr2, n, scale); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::dmodel_dcube_downscale( int scale_x, int scale_y, int scale_z, int offset_x, int offset_y, int offset_z, int src_size_x, int src_size_y, int src_size_z, int dst_size_x, int dst_size_y, int dst_size_z, const T* src_cube, T* dst_cube) { const int n = dst_size_x * dst_size_y * dst_size_z; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::dmodel_dcube_downscale), dim3(gsize), dim3(bsize), 0, 0, scale_x, scale_y, scale_z, offset_x, offset_y, offset_z, src_size_x, src_size_y, src_size_z, dst_size_x, dst_size_y, dst_size_z, src_cube, dst_cube); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::dmodel_dcube_mask( T cutoff, bool apply, int size_x, int size_y, int size_z, T* dcube_d, T* dcube_m, T* dcube_w) { const int n = size_x * size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::dmodel_dcube_mask), dim3(gsize), dim3(bsize), 0, 0, cutoff, apply, size_x, size_y, size_z, dcube_d, dcube_m, dcube_w); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::dmodel_mmaps_moments( int size_x, int size_y, int size_z, T step_x, T step_y, T step_z, T zero_x, T zero_y, T zero_z, const T* dcube_d, const T* dcube_w, T cutoff, int norders, const int* orders, T* mmaps_d, T* mmaps_m, T* mmaps_w) { const int n = size_x * size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::dcube_moments), dim3(gsize), dim3(bsize), 0, 0, size_x, size_y, size_z, step_x, step_y, step_z, zero_x, zero_y, zero_z, dcube_d, dcube_w, cutoff, norders, orders, mmaps_d, mmaps_m, mmaps_w); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::gmodel_wcube_evaluate( int spat_size_x, int spat_size_y, int spat_size_z, int spec_size_z, const T* spat_cube, T* spec_cube) { const int n = spat_size_x * spat_size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::gmodel_wcube_evaluate), dim3(gsize), dim3(bsize), 0, 0, spat_size_x, spat_size_y, spat_size_z, spec_size_z, spat_cube, spec_cube); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::gmodel_mcdisk_evaluate( T cflux, int nclouds, const int* ncloudscsum, int ncloudscsum_len, const bool* hasordint, bool loose, bool tilted, int nrnodes, const T* rnodes, const T* vsys, const T* xpos, const T* ypos, const T* posa, const T* incl, int nrt, const int* rpt_uids, const T* rpt_cvalues, const int* rpt_ccounts, const T* rpt_pvalues, const int* rpt_pcounts, const int* rht_uids, const T* rht_cvalues, const int* rht_ccounts, const T* rht_pvalues, const int* rht_pcounts, int nvt, const int* vpt_uids, const T* vpt_cvalues, const int* vpt_ccounts, const T* vpt_pvalues, const int* vpt_pcounts, const int* vht_uids, const T* vht_cvalues, const int* vht_ccounts, const T* vht_pvalues, const int* vht_pcounts, int ndt, const int* dpt_uids, const T* dpt_cvalues, const int* dpt_ccounts, const T* dpt_pvalues, const int* dpt_pcounts, const int* dht_uids, const T* dht_cvalues, const int* dht_ccounts, const T* dht_pvalues, const int* dht_pcounts, int nzt, const int* zpt_uids, const T* zpt_cvalues, const int* zpt_ccounts, const T* zpt_pvalues, const int* zpt_pcounts, int nst, const int* spt_uids, const T* spt_cvalues, const int* spt_ccounts, const T* spt_pvalues, const int* spt_pcounts, int nwt, const int* wpt_uids, const T* wpt_cvalues, const int* wpt_ccounts, const T* wpt_pvalues, const int* wpt_pcounts, const T* opacity, int spat_size_x, int spat_size_y, int spat_size_z, T spat_step_x, T spat_step_y, T spat_step_z, T spat_zero_x, T spat_zero_y, T spat_zero_z, int spec_size, T spec_step, T spec_zero, T* image, T* scube, T* wdata, T* wdata_cmp, T* rdata, T* rdata_cmp, T* ordata, T* ordata_cmp, T* vdata_cmp, T* ddata_cmp) { const int n = nclouds; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::gmodel_mcdisk_evaluate), dim3(gsize), dim3(bsize), 0, 0, cflux, nclouds, ncloudscsum, ncloudscsum_len, hasordint, loose, tilted, nrnodes, rnodes, vsys, xpos, ypos, posa, incl, nrt, rpt_uids, rpt_cvalues, rpt_ccounts, rpt_pvalues, rpt_pcounts, rht_uids, rht_cvalues, rht_ccounts, rht_pvalues, rht_pcounts, nvt, vpt_uids, vpt_cvalues, vpt_ccounts, vpt_pvalues, vpt_pcounts, vht_uids, vht_cvalues, vht_ccounts, vht_pvalues, vht_pcounts, ndt, dpt_uids, dpt_cvalues, dpt_ccounts, dpt_pvalues, dpt_pcounts, dht_uids, dht_cvalues, dht_ccounts, dht_pvalues, dht_pcounts, nzt, zpt_uids, zpt_cvalues, zpt_ccounts, zpt_pvalues, zpt_pcounts, nst, spt_uids, spt_cvalues, spt_ccounts, spt_pvalues, spt_pcounts, nwt, wpt_uids, wpt_cvalues, wpt_ccounts, wpt_pvalues, wpt_pcounts, opacity, spat_size_x, spat_size_y, spat_size_z, spat_step_x, spat_step_y, spat_step_z, spat_zero_x, spat_zero_y, spat_zero_z, spec_size, spec_step, spec_zero, image, scube, wdata, wdata_cmp, rdata, rdata_cmp, ordata, ordata_cmp, vdata_cmp, ddata_cmp); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::gmodel_smdisk_evaluate( bool loose, bool tilted, int nrnodes, const T* rnodes, const T* vsys, const T* xpos, const T* ypos, const T* posa, const T* incl, int nrt, const int* rpt_uids, const T* rpt_cvalues, const int* rpt_ccounts, const T* rpt_pvalues, const int* rpt_pcounts, const int* rht_uids, const T* rht_cvalues, const int* rht_ccounts, const T* rht_pvalues, const int* rht_pcounts, int nvt, const int* vpt_uids, const T* vpt_cvalues, const int* vpt_ccounts, const T* vpt_pvalues, const int* vpt_pcounts, const int* vht_uids, const T* vht_cvalues, const int* vht_ccounts, const T* vht_pvalues, const int* vht_pcounts, int ndt, const int* dpt_uids, const T* dpt_cvalues, const int* dpt_ccounts, const T* dpt_pvalues, const int* dpt_pcounts, const int* dht_uids, const T* dht_cvalues, const int* dht_ccounts, const T* dht_pvalues, const int* dht_pcounts, int nzt, const int* zpt_uids, const T* zpt_cvalues, const int* zpt_ccounts, const T* zpt_pvalues, const int* zpt_pcounts, int nst, const int* spt_uids, const T* spt_cvalues, const int* spt_ccounts, const T* spt_pvalues, const int* spt_pcounts, int nwt, const int* wpt_uids, const T* wpt_cvalues, const int* wpt_ccounts, const T* wpt_pvalues, const int* wpt_pcounts, const T* opacity, int spat_size_x, int spat_size_y, int spat_size_z, T spat_step_x, T spat_step_y, T spat_step_z, T spat_zero_x, T spat_zero_y, T spat_zero_z, int spec_size, T spec_step, T spec_zero, T* image, T* scube, T* wdata, T* wdata_cmp, T* rdata, T* rdata_cmp, T* ordata, T* ordata_cmp, T* vdata_cmp, T* ddata_cmp) { const int n = spat_size_x * spat_size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::gmodel_smdisk_evaluate), dim3(gsize), dim3(bsize), 0, 0, loose, tilted, nrnodes, rnodes, vsys, xpos, ypos, posa, incl, nrt, rpt_uids, rpt_cvalues, rpt_ccounts, rpt_pvalues, rpt_pcounts, rht_uids, rht_cvalues, rht_ccounts, rht_pvalues, rht_pcounts, nvt, vpt_uids, vpt_cvalues, vpt_ccounts, vpt_pvalues, vpt_pcounts, vht_uids, vht_cvalues, vht_ccounts, vht_pvalues, vht_pcounts, ndt, dpt_uids, dpt_cvalues, dpt_ccounts, dpt_pvalues, dpt_pcounts, dht_uids, dht_cvalues, dht_ccounts, dht_pvalues, dht_pcounts, nzt, zpt_uids, zpt_cvalues, zpt_ccounts, zpt_pvalues, zpt_pcounts, nst, spt_uids, spt_cvalues, spt_ccounts, spt_pvalues, spt_pcounts, nwt, wpt_uids, wpt_cvalues, wpt_ccounts, wpt_pvalues, wpt_pcounts, opacity, spat_size_x, spat_size_y, spat_size_z, spat_step_x, spat_step_y, spat_step_z, spat_zero_x, spat_zero_y, spat_zero_z, spec_size, spec_step, spec_zero, image, scube, wdata, wdata_cmp, rdata, rdata_cmp, ordata, ordata_cmp, vdata_cmp, ddata_cmp); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::objective_count_pixels( const T* data1, const T* data2, int size, T epsilon, int* counts) { const int n = size; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::objective_count_pixels), dim3(gsize), dim3(bsize), 0, 0, data1, data2, size, epsilon, counts); hipDeviceSynchronize(); } template<typename T> void Wrapper<T>::objective_residual( const T* obs_d, const T* obs_e, const T* obs_m, const T* mdl_d, const T* mdl_w, const T* mdl_m, int size, T weight, T* res) { const int n = size; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); hipLaunchKernelGGL(( kernels::objective_residual), dim3(gsize), dim3(bsize), 0, 0, obs_d, obs_e, obs_m, mdl_d, mdl_w, mdl_m, size, weight, res); hipDeviceSynchronize(); } #define INSTANTIATE(T)\ template struct Wrapper<T>; INSTANTIATE(float) #undef INSTANTIATE } // namespace gbkfit::cuda
676d0ec79202b90bcef63003b6d1239daa059711.cu
#include "gbkfit/cuda/kernels.hpp" #include "gbkfit/cuda/wrapper.hpp" namespace gbkfit::cuda { constexpr int BLOCK_SIZE = 256; template<typename T> void Wrapper<T>::math_complex_multiply_and_scale( typename cufft<T>::complex* arr1, typename cufft<T>::complex* arr2, int n, T scale) { dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::math_complex_multiply_and_scale<<<gsize, bsize>>>( arr1, arr2, n, scale); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::dmodel_dcube_downscale( int scale_x, int scale_y, int scale_z, int offset_x, int offset_y, int offset_z, int src_size_x, int src_size_y, int src_size_z, int dst_size_x, int dst_size_y, int dst_size_z, const T* src_cube, T* dst_cube) { const int n = dst_size_x * dst_size_y * dst_size_z; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::dmodel_dcube_downscale<<<gsize, bsize>>>( scale_x, scale_y, scale_z, offset_x, offset_y, offset_z, src_size_x, src_size_y, src_size_z, dst_size_x, dst_size_y, dst_size_z, src_cube, dst_cube); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::dmodel_dcube_mask( T cutoff, bool apply, int size_x, int size_y, int size_z, T* dcube_d, T* dcube_m, T* dcube_w) { const int n = size_x * size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::dmodel_dcube_mask<<<gsize, bsize>>>( cutoff, apply, size_x, size_y, size_z, dcube_d, dcube_m, dcube_w); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::dmodel_mmaps_moments( int size_x, int size_y, int size_z, T step_x, T step_y, T step_z, T zero_x, T zero_y, T zero_z, const T* dcube_d, const T* dcube_w, T cutoff, int norders, const int* orders, T* mmaps_d, T* mmaps_m, T* mmaps_w) { const int n = size_x * size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::dcube_moments<<<gsize, bsize>>>( size_x, size_y, size_z, step_x, step_y, step_z, zero_x, zero_y, zero_z, dcube_d, dcube_w, cutoff, norders, orders, mmaps_d, mmaps_m, mmaps_w); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::gmodel_wcube_evaluate( int spat_size_x, int spat_size_y, int spat_size_z, int spec_size_z, const T* spat_cube, T* spec_cube) { const int n = spat_size_x * spat_size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::gmodel_wcube_evaluate<<<gsize, bsize>>>( spat_size_x, spat_size_y, spat_size_z, spec_size_z, spat_cube, spec_cube); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::gmodel_mcdisk_evaluate( T cflux, int nclouds, const int* ncloudscsum, int ncloudscsum_len, const bool* hasordint, bool loose, bool tilted, int nrnodes, const T* rnodes, const T* vsys, const T* xpos, const T* ypos, const T* posa, const T* incl, int nrt, const int* rpt_uids, const T* rpt_cvalues, const int* rpt_ccounts, const T* rpt_pvalues, const int* rpt_pcounts, const int* rht_uids, const T* rht_cvalues, const int* rht_ccounts, const T* rht_pvalues, const int* rht_pcounts, int nvt, const int* vpt_uids, const T* vpt_cvalues, const int* vpt_ccounts, const T* vpt_pvalues, const int* vpt_pcounts, const int* vht_uids, const T* vht_cvalues, const int* vht_ccounts, const T* vht_pvalues, const int* vht_pcounts, int ndt, const int* dpt_uids, const T* dpt_cvalues, const int* dpt_ccounts, const T* dpt_pvalues, const int* dpt_pcounts, const int* dht_uids, const T* dht_cvalues, const int* dht_ccounts, const T* dht_pvalues, const int* dht_pcounts, int nzt, const int* zpt_uids, const T* zpt_cvalues, const int* zpt_ccounts, const T* zpt_pvalues, const int* zpt_pcounts, int nst, const int* spt_uids, const T* spt_cvalues, const int* spt_ccounts, const T* spt_pvalues, const int* spt_pcounts, int nwt, const int* wpt_uids, const T* wpt_cvalues, const int* wpt_ccounts, const T* wpt_pvalues, const int* wpt_pcounts, const T* opacity, int spat_size_x, int spat_size_y, int spat_size_z, T spat_step_x, T spat_step_y, T spat_step_z, T spat_zero_x, T spat_zero_y, T spat_zero_z, int spec_size, T spec_step, T spec_zero, T* image, T* scube, T* wdata, T* wdata_cmp, T* rdata, T* rdata_cmp, T* ordata, T* ordata_cmp, T* vdata_cmp, T* ddata_cmp) { const int n = nclouds; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::gmodel_mcdisk_evaluate<<<gsize, bsize>>>( cflux, nclouds, ncloudscsum, ncloudscsum_len, hasordint, loose, tilted, nrnodes, rnodes, vsys, xpos, ypos, posa, incl, nrt, rpt_uids, rpt_cvalues, rpt_ccounts, rpt_pvalues, rpt_pcounts, rht_uids, rht_cvalues, rht_ccounts, rht_pvalues, rht_pcounts, nvt, vpt_uids, vpt_cvalues, vpt_ccounts, vpt_pvalues, vpt_pcounts, vht_uids, vht_cvalues, vht_ccounts, vht_pvalues, vht_pcounts, ndt, dpt_uids, dpt_cvalues, dpt_ccounts, dpt_pvalues, dpt_pcounts, dht_uids, dht_cvalues, dht_ccounts, dht_pvalues, dht_pcounts, nzt, zpt_uids, zpt_cvalues, zpt_ccounts, zpt_pvalues, zpt_pcounts, nst, spt_uids, spt_cvalues, spt_ccounts, spt_pvalues, spt_pcounts, nwt, wpt_uids, wpt_cvalues, wpt_ccounts, wpt_pvalues, wpt_pcounts, opacity, spat_size_x, spat_size_y, spat_size_z, spat_step_x, spat_step_y, spat_step_z, spat_zero_x, spat_zero_y, spat_zero_z, spec_size, spec_step, spec_zero, image, scube, wdata, wdata_cmp, rdata, rdata_cmp, ordata, ordata_cmp, vdata_cmp, ddata_cmp); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::gmodel_smdisk_evaluate( bool loose, bool tilted, int nrnodes, const T* rnodes, const T* vsys, const T* xpos, const T* ypos, const T* posa, const T* incl, int nrt, const int* rpt_uids, const T* rpt_cvalues, const int* rpt_ccounts, const T* rpt_pvalues, const int* rpt_pcounts, const int* rht_uids, const T* rht_cvalues, const int* rht_ccounts, const T* rht_pvalues, const int* rht_pcounts, int nvt, const int* vpt_uids, const T* vpt_cvalues, const int* vpt_ccounts, const T* vpt_pvalues, const int* vpt_pcounts, const int* vht_uids, const T* vht_cvalues, const int* vht_ccounts, const T* vht_pvalues, const int* vht_pcounts, int ndt, const int* dpt_uids, const T* dpt_cvalues, const int* dpt_ccounts, const T* dpt_pvalues, const int* dpt_pcounts, const int* dht_uids, const T* dht_cvalues, const int* dht_ccounts, const T* dht_pvalues, const int* dht_pcounts, int nzt, const int* zpt_uids, const T* zpt_cvalues, const int* zpt_ccounts, const T* zpt_pvalues, const int* zpt_pcounts, int nst, const int* spt_uids, const T* spt_cvalues, const int* spt_ccounts, const T* spt_pvalues, const int* spt_pcounts, int nwt, const int* wpt_uids, const T* wpt_cvalues, const int* wpt_ccounts, const T* wpt_pvalues, const int* wpt_pcounts, const T* opacity, int spat_size_x, int spat_size_y, int spat_size_z, T spat_step_x, T spat_step_y, T spat_step_z, T spat_zero_x, T spat_zero_y, T spat_zero_z, int spec_size, T spec_step, T spec_zero, T* image, T* scube, T* wdata, T* wdata_cmp, T* rdata, T* rdata_cmp, T* ordata, T* ordata_cmp, T* vdata_cmp, T* ddata_cmp) { const int n = spat_size_x * spat_size_y; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::gmodel_smdisk_evaluate<<<gsize, bsize>>>( loose, tilted, nrnodes, rnodes, vsys, xpos, ypos, posa, incl, nrt, rpt_uids, rpt_cvalues, rpt_ccounts, rpt_pvalues, rpt_pcounts, rht_uids, rht_cvalues, rht_ccounts, rht_pvalues, rht_pcounts, nvt, vpt_uids, vpt_cvalues, vpt_ccounts, vpt_pvalues, vpt_pcounts, vht_uids, vht_cvalues, vht_ccounts, vht_pvalues, vht_pcounts, ndt, dpt_uids, dpt_cvalues, dpt_ccounts, dpt_pvalues, dpt_pcounts, dht_uids, dht_cvalues, dht_ccounts, dht_pvalues, dht_pcounts, nzt, zpt_uids, zpt_cvalues, zpt_ccounts, zpt_pvalues, zpt_pcounts, nst, spt_uids, spt_cvalues, spt_ccounts, spt_pvalues, spt_pcounts, nwt, wpt_uids, wpt_cvalues, wpt_ccounts, wpt_pvalues, wpt_pcounts, opacity, spat_size_x, spat_size_y, spat_size_z, spat_step_x, spat_step_y, spat_step_z, spat_zero_x, spat_zero_y, spat_zero_z, spec_size, spec_step, spec_zero, image, scube, wdata, wdata_cmp, rdata, rdata_cmp, ordata, ordata_cmp, vdata_cmp, ddata_cmp); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::objective_count_pixels( const T* data1, const T* data2, int size, T epsilon, int* counts) { const int n = size; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::objective_count_pixels<<<gsize, bsize>>>( data1, data2, size, epsilon, counts); cudaDeviceSynchronize(); } template<typename T> void Wrapper<T>::objective_residual( const T* obs_d, const T* obs_e, const T* obs_m, const T* mdl_d, const T* mdl_w, const T* mdl_m, int size, T weight, T* res) { const int n = size; dim3 bsize(BLOCK_SIZE); dim3 gsize((n + bsize.x - 1) / bsize.x); kernels::objective_residual<<<gsize, bsize>>>( obs_d, obs_e, obs_m, mdl_d, mdl_w, mdl_m, size, weight, res); cudaDeviceSynchronize(); } #define INSTANTIATE(T)\ template struct Wrapper<T>; INSTANTIATE(float) #undef INSTANTIATE } // namespace gbkfit::cuda
61875c0af88d11fedef3d267203807a186a53a1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <cstdio> #include <fstream> #include <hiprand/hiprand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> using namespace std; __device__ int d_count = 0; __global__ void colourMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } if (colouring[i]!=0){ return; } int myMax = numbers[i]; // printf("I am node %d with value %d\n", i+1, myMax); int start = -1, stop = -1; start = vertexArray[i]; stop = vertexArray[i+1]; for (int j=start; j<stop; j++){ // printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1); int neighbour = neighbourArray[j]-1; if (colouring[neighbour]==0 && numbers[neighbour] >= myMax){ if (numbers[neighbour] == myMax){ if (i < neighbour){ continue; } } return; } } colouring[i] = currentColour; atomicAdd(&d_count, 1); } __global__ void setup_kernel (hiprandState_t * state, unsigned long seed ){ int i= blockDim.x * blockIdx.x + threadIdx.x; hiprand_init (seed, i, 0, &state[i]); } __global__ void randomNumbering (hiprandState_t* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t localState = globalState[i]; float RANDOM = hiprand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; stop = vertexArray[i+1]; diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { string a, b; int n, m; cin>>n>>m; ofstream fout; fout.open("output2.txt",ios::app); int *h_count = new int; int *h_vertexArray = new int [n+1]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_colour = new int [n]; int *d_vertexArray = NULL; hipMalloc((void **)&d_vertexArray, (n+1)*sizeof(int)); int *d_neighbourArray = NULL; hipMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_colour = NULL; hipMalloc((void **)&d_colour, (n)*sizeof(int)); hipMemset((void *)d_colour, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; hipMalloc((void **)&d_degreeCount, (n)*sizeof(int)); hipMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); hiprandState_t* devStates; hipMalloc ( &devStates, n*sizeof( hiprandState_t ) ); for (int i = 0; i < n+1; ++i) { h_vertexArray[i]=2*m; } int NSlast = 0; int NSoffset = 0; int NSprev=0; for (int i=0; i<2*m; i++){ int start, end; cin>>start>>end; for (int j=NSlast+1; j<start; j++){ h_vertexArray[j-1]=NSoffset; } if (NSprev!=start){ NSlast=start; h_vertexArray[start-1]=NSoffset; NSprev=start; } h_neighbourArray[NSoffset]=end; NSoffset++; } hipEvent_t start, stop; float timeNew; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMemcpy(d_vertexArray, h_vertexArray, (n+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), hipMemcpyHostToDevice); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"; int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; hipLaunchKernelGGL(( degreeCalc), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m); thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount); int maxDegree = *(thrust::max_element(d_ptr, d_ptr + n)); cout<<"Max = "<<maxDegree<<endl; hipEventRecord(start, 0); hipLaunchKernelGGL(( setup_kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, time(NULL) ); hipLaunchKernelGGL(( randomNumbering), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, d_degreeCount, n, n); // hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost); // cout<<"Random numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"; int colourCount = 1; hipEventRecord(start, 0); while (1){ hipLaunchKernelGGL(( colourMax), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount); hipMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, hipMemcpyDeviceToHost); // cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl; if (*h_count == n){ break; } colourCount++; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"<<colourCount<<"\t"; hipEventRecord(start, 0); hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"; thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour); int maxColour = *(thrust::max_element(c_ptr, c_ptr + n)); cout<<"Max Colour = "<<maxColour<<endl; fout<<maxColour<<"\n"; // cout<<"Colour numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_colour[i]<<endl; // } delete h_count; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_colour; hipFree(d_neighbourArray); hipFree(d_vertexArray); hipFree(d_degreeCount); hipFree(d_colour); hipDeviceReset(); return 0; }
61875c0af88d11fedef3d267203807a186a53a1e.cu
#include <iostream> #include <cstdlib> #include <cstdio> #include <fstream> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> using namespace std; __device__ int d_count = 0; __global__ void colourMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } if (colouring[i]!=0){ return; } int myMax = numbers[i]; // printf("I am node %d with value %d\n", i+1, myMax); int start = -1, stop = -1; start = vertexArray[i]; stop = vertexArray[i+1]; for (int j=start; j<stop; j++){ // printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1); int neighbour = neighbourArray[j]-1; if (colouring[neighbour]==0 && numbers[neighbour] >= myMax){ if (numbers[neighbour] == myMax){ if (i < neighbour){ continue; } } return; } } colouring[i] = currentColour; atomicAdd(&d_count, 1); } __global__ void setup_kernel (curandState * state, unsigned long seed ){ int i= blockDim.x * blockIdx.x + threadIdx.x; curand_init (seed, i, 0, &state[i]); } __global__ void randomNumbering (curandState* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; curandState localState = globalState[i]; float RANDOM = curand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; stop = vertexArray[i+1]; diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { string a, b; int n, m; cin>>n>>m; ofstream fout; fout.open("output2.txt",ios::app); int *h_count = new int; int *h_vertexArray = new int [n+1]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_colour = new int [n]; int *d_vertexArray = NULL; cudaMalloc((void **)&d_vertexArray, (n+1)*sizeof(int)); int *d_neighbourArray = NULL; cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_colour = NULL; cudaMalloc((void **)&d_colour, (n)*sizeof(int)); cudaMemset((void *)d_colour, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int)); cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); curandState* devStates; cudaMalloc ( &devStates, n*sizeof( curandState ) ); for (int i = 0; i < n+1; ++i) { h_vertexArray[i]=2*m; } int NSlast = 0; int NSoffset = 0; int NSprev=0; for (int i=0; i<2*m; i++){ int start, end; cin>>start>>end; for (int j=NSlast+1; j<start; j++){ h_vertexArray[j-1]=NSoffset; } if (NSprev!=start){ NSlast=start; h_vertexArray[start-1]=NSoffset; NSprev=start; } h_neighbourArray[NSoffset]=end; NSoffset++; } cudaEvent_t start, stop; float timeNew; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMemcpy(d_vertexArray, h_vertexArray, (n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"; int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m); thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount); int maxDegree = *(thrust::max_element(d_ptr, d_ptr + n)); cout<<"Max = "<<maxDegree<<endl; cudaEventRecord(start, 0); setup_kernel <<<blocksPerGrid, threadsPerBlock>>> ( devStates, time(NULL) ); randomNumbering<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, n); // cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); // cout<<"Random numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"; int colourCount = 1; cudaEventRecord(start, 0); while (1){ colourMax<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount); cudaMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, cudaMemcpyDeviceToHost); // cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl; if (*h_count == n){ break; } colourCount++; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"<<colourCount<<"\t"; cudaEventRecord(start, 0); cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timeNew, start, stop); fout<<timeNew<<"\t"; thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour); int maxColour = *(thrust::max_element(c_ptr, c_ptr + n)); cout<<"Max Colour = "<<maxColour<<endl; fout<<maxColour<<"\n"; // cout<<"Colour numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_colour[i]<<endl; // } delete h_count; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_colour; cudaFree(d_neighbourArray); cudaFree(d_vertexArray); cudaFree(d_degreeCount); cudaFree(d_colour); cudaDeviceReset(); return 0; }
f8cd1dc4052eeb41f15350d4b3fb05256c85f8dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "PPPMForceComputeGPU.cuh" #include "hoomd/TextureTools.h" // __scalar2int_rd is __float2int_rd in single, __double2int_rd in double #ifdef SINGLE_PRECISION #define __scalar2int_rd __float2int_rd #else #define __scalar2int_rd __double2int_rd #endif #define GPU_PPPM_MAX_ORDER 7 // workaround for HIP bug #ifdef __HIP_PLATFORM_HCC__ inline __device__ float myAtomicAdd(float* address, float val) { unsigned int* address_as_uint = (unsigned int*)address; unsigned int old = *address_as_uint, assumed; do { assumed = old; old = atomicCAS(address_as_uint, assumed, __float_as_uint(val + __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } #else inline __device__ float myAtomicAdd(float* address, float val) { return atomicAdd(address, val); } #endif //! GPU implementation of sinc(x)==sin(x)/x __device__ Scalar gpu_sinc(Scalar x) { Scalar sinc = 0; //! Coefficients of a power expansion of sin(x)/x const Scalar sinc_coeff[] = {Scalar(1.0), Scalar(-1.0/6.0), Scalar(1.0/120.0), Scalar(-1.0/5040.0),Scalar(1.0/362880.0), Scalar(-1.0/39916800.0)}; if (x*x <= Scalar(1.0)) { Scalar term = Scalar(1.0); for (unsigned int i = 0; i < 6; ++i) { sinc += sinc_coeff[i] * term; term *= x*x; } } else { sinc = fast::sin(x)/x; } return sinc; } __device__ int3 find_cell(const Scalar3& pos, const unsigned int& inner_nx, const unsigned int& inner_ny, const unsigned int& inner_nz, const uint3& n_ghost_cells, const BoxDim& box, int order, Scalar3& dr) { // compute coordinates in units of the mesh size Scalar3 f = box.makeFraction(pos); uchar3 periodic = box.getPeriodic(); Scalar3 reduced_pos = make_scalar3(f.x * (Scalar)inner_nx, f.y * (Scalar)inner_ny, f.z * (Scalar)inner_nz); reduced_pos += make_scalar3(n_ghost_cells.x, n_ghost_cells.y, n_ghost_cells.z); Scalar shift, shiftone; if (order % 2) { shift =Scalar(0.5); shiftone = Scalar(0.0); } else { shift = Scalar(0.0); shiftone = Scalar(0.5); } int ix = __scalar2int_rd(reduced_pos.x + shift); int iy = __scalar2int_rd(reduced_pos.y + shift); int iz = __scalar2int_rd(reduced_pos.z + shift); // set distance to cell center dr.x = shiftone + (Scalar) ix - reduced_pos.x; dr.y = shiftone + (Scalar) iy - reduced_pos.y; dr.z = shiftone + (Scalar) iz - reduced_pos.z; // handle particles on the boundary if (periodic.x && ix == (int)inner_nx) ix = 0; if (periodic.y && iy == (int)inner_ny) iy = 0; if (periodic.z && iz == (int)inner_nz) iz = 0; return make_int3(ix, iy, iz); } __global__ void gpu_assign_particles_kernel(const uint3 mesh_dim, const uint3 n_ghost_bins, unsigned int work_size, const unsigned int *d_index_array, const Scalar4 *d_postype, const Scalar *d_charge, hipfftComplex *d_mesh, Scalar V_cell, int order, unsigned int offset, BoxDim box, const Scalar *d_rho_coeff) { extern __shared__ Scalar s_coeff[]; // load in interpolation coefficients unsigned int ncoeffs = order*(2*order+1); for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ncoeffs) { s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x]; } } __syncthreads(); unsigned int work_idx = blockIdx.x*blockDim.x+threadIdx.x; if (work_idx >= work_size) return; unsigned int group_idx = work_idx + offset; int3 bin_dim = make_int3(mesh_dim.x+2*n_ghost_bins.x, mesh_dim.y+2*n_ghost_bins.y, mesh_dim.z+2*n_ghost_bins.z); // grid coordinates of bin (column-major) unsigned int idx = d_index_array[group_idx]; Scalar4 postype = d_postype[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); Scalar qi = d_charge[idx]; // compute coordinates in units of the cell size Scalar3 dr = make_scalar3(0,0,0); int3 bin_coord = find_cell(pos, mesh_dim.x, mesh_dim.y, mesh_dim.z, n_ghost_bins, box, order, dr); // ignore particles that are not within our domain (the error should be caught by HOOMD's cell list) if (bin_coord.x < 0 || bin_coord.x >= bin_dim.x || bin_coord.y < 0 || bin_coord.y >= bin_dim.y || bin_coord.z < 0 || bin_coord.z >= bin_dim.z) { return; } int i = bin_coord.x; int j = bin_coord.y; int k = bin_coord.z; int nlower = - (order - 1)/2; int nupper = order/2; Scalar result; int mult_fact = 2*order + 1; Scalar x0 = qi; bool ignore_x = false; bool ignore_y = false; bool ignore_z = false; // loop over neighboring bins for (int l = nlower; l <= nupper; ++l) { // precalculate assignment factor result = Scalar(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { result = s_coeff[l-nlower + iorder*mult_fact] + result * dr.x; } Scalar y0 = x0 * result; int neighi = i + l; if (neighi >= (int)bin_dim.x) { if (! n_ghost_bins.x) neighi -= (int)bin_dim.x; else ignore_x = true; } else if (neighi < 0) { if (! n_ghost_bins.x) neighi += (int)bin_dim.x; else ignore_x = true; } for (int m = nlower; m <= nupper; ++m) { result = Scalar(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { result = s_coeff[m-nlower + iorder*mult_fact] + result * dr.y; } Scalar z0 = y0 * result; int neighj = j + m; if (neighj >= (int) bin_dim.y) { if (! n_ghost_bins.y) neighj -= (int)bin_dim.y; else ignore_y = true; } else if (neighj < 0) { if (! n_ghost_bins.y) neighj += (int)bin_dim.y; else ignore_y = true; } for (int n = nlower; n <= nupper; ++n) { result = Scalar(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { result = s_coeff[n-nlower + iorder*mult_fact] + result * dr.z; } int neighk = k + n; if (neighk >= (int)bin_dim.z) { if (! n_ghost_bins.z) neighk -= (int)bin_dim.z; else ignore_z = true; } else if (neighk < 0) { if (! n_ghost_bins.z) neighk += (int)bin_dim.z; else ignore_z = true; } if (!ignore_x && !ignore_y && !ignore_z) { // write out to global memory using row-major unsigned int cell_idx = neighi + bin_dim.x * (neighj + bin_dim.y * neighk); // compute fraction of particle density assigned to cell // from particles in this bin myAtomicAdd(&d_mesh[cell_idx].x, z0*result/V_cell); } ignore_z = false; } ignore_y = false; } ignore_x = false; } // end of loop over neighboring bins } __global__ void gpu_reduce_meshes(const unsigned int mesh_elements, const hipfftComplex *d_mesh_scratch, hipfftComplex *d_mesh, unsigned int ngpu) { unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= mesh_elements) return; hipfftComplex res; res.x = 0; res.y = 0; // reduce over all temporary meshes for (unsigned int igpu = 0; igpu < ngpu; ++igpu) { hipfftComplex m = d_mesh_scratch[idx + igpu*mesh_elements]; res.x += m.x; res.y += m.y; } d_mesh[idx] = res; } void gpu_assign_particles(const uint3 mesh_dim, const uint3 n_ghost_bins, const uint3 grid_dim, unsigned int group_size, const unsigned int *d_index_array, const Scalar4 *d_postype, const Scalar *d_charge, hipfftComplex *d_mesh, hipfftComplex *d_mesh_scratch, const unsigned int mesh_elements, int order, const BoxDim& box, unsigned int block_size, const Scalar *d_rho_coeff, const hipDeviceProp_t& dev_prop, const GPUPartition &gpu_partition ) { hipMemsetAsync(d_mesh, 0, sizeof(hipfftComplex)*grid_dim.x*grid_dim.y*grid_dim.z); Scalar V_cell = box.getVolume()/(Scalar)(mesh_dim.x*mesh_dim.y*mesh_dim.z); static unsigned int max_block_size = UINT_MAX; static hipFuncAttributes attr; if (max_block_size == UINT_MAX) { hipFuncGetAttributes(&attr, (const void*)gpu_assign_particles_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); while (attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { run_block_size -= dev_prop.warpSize; } // iterate over active GPUs in reverse, to end up on first GPU when returning from this function unsigned int ngpu = gpu_partition.getNumActiveGPUs(); for (int idev = ngpu - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); if (ngpu > 1) { // zero the temporary mesh array hipMemsetAsync(d_mesh_scratch + idev*mesh_elements, 0, sizeof(hipfftComplex)*mesh_elements); } unsigned int nwork = range.second - range.first; unsigned int n_blocks = nwork/run_block_size+1; unsigned int shared_bytes = (unsigned int)(order*(2*order+1)*sizeof(Scalar)); hipLaunchKernelGGL((gpu_assign_particles_kernel), dim3(n_blocks), dim3(run_block_size), shared_bytes, 0, mesh_dim, n_ghost_bins, nwork, d_index_array, d_postype, d_charge, ngpu > 1 ? d_mesh_scratch + idev*mesh_elements : d_mesh, V_cell, order, range.first, box, d_rho_coeff); } } //! Reduce temporary arrays for every GPU void gpu_reduce_meshes(const unsigned int mesh_elements, const hipfftComplex *d_mesh_scratch, hipfftComplex *d_mesh, const unsigned int ngpu, const unsigned int block_size) { // reduce meshes on GPU 0 hipLaunchKernelGGL((gpu_reduce_meshes), dim3(mesh_elements/block_size + 1), dim3(block_size), 0, 0, mesh_elements, d_mesh_scratch, d_mesh, ngpu); } __global__ void gpu_compute_mesh_virial_kernel(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, Scalar *d_inf_f, Scalar *d_virial_mesh, const Scalar3 *d_k, const bool exclude_dc, Scalar kappa ) { unsigned int idx; idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n_wave_vectors) return; if (!exclude_dc || idx != 0) { // non-zero wave vector hipfftComplex fourier = d_fourier_mesh[idx]; Scalar3 k = d_k[idx]; Scalar rhog = (fourier.x * fourier.x + fourier.y * fourier.y)*d_inf_f[idx]; Scalar vterm = -Scalar(2.0)*(Scalar(1.0)/dot(k,k) + Scalar(0.25)/(kappa*kappa)); d_virial_mesh[0*n_wave_vectors+idx] = rhog*(Scalar(1.0) + vterm*k.x*k.x); // xx d_virial_mesh[1*n_wave_vectors+idx] = rhog*( vterm*k.x*k.y); // xy d_virial_mesh[2*n_wave_vectors+idx] = rhog*( vterm*k.x*k.z); // xz d_virial_mesh[3*n_wave_vectors+idx] = rhog*(Scalar(1.0) + vterm*k.y*k.y); // yy d_virial_mesh[4*n_wave_vectors+idx] = rhog*( vterm*k.y*k.z); // yz d_virial_mesh[5*n_wave_vectors+idx] = rhog*(Scalar(1.0) + vterm*k.z*k.z); // zz } else { d_virial_mesh[0*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[1*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[2*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[3*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[4*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[5*n_wave_vectors+idx] = Scalar(0.0); } } void gpu_compute_mesh_virial(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, Scalar *d_inf_f, Scalar *d_virial_mesh, const Scalar3 *d_k, const bool exclude_dc, Scalar kappa) { const unsigned int block_size = 256; dim3 grid(n_wave_vectors/block_size + 1, 1, 1); hipLaunchKernelGGL((gpu_compute_mesh_virial_kernel), dim3(grid), dim3(block_size), 0, 0, n_wave_vectors, d_fourier_mesh, d_inf_f, d_virial_mesh, d_k, exclude_dc, kappa); } __global__ void gpu_update_meshes_kernel(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, hipfftComplex *d_fourier_mesh_G_x, hipfftComplex *d_fourier_mesh_G_y, hipfftComplex *d_fourier_mesh_G_z, const Scalar *d_inf_f, const Scalar3 *d_k, unsigned int NNN) { unsigned int k; k = blockDim.x * blockIdx.x + threadIdx.x; if (k >= n_wave_vectors) return; hipfftComplex f = d_fourier_mesh[k]; Scalar scaled_inf_f = d_inf_f[k] / ((Scalar)NNN); Scalar3 kvec = d_k[k]; // Normalization hipfftComplex fourier_G_x; fourier_G_x.x =f.y * kvec.x * scaled_inf_f; fourier_G_x.y =-f.x * kvec.x * scaled_inf_f; hipfftComplex fourier_G_y; fourier_G_y.x =f.y * kvec.y * scaled_inf_f; fourier_G_y.y =-f.x * kvec.y * scaled_inf_f; hipfftComplex fourier_G_z; fourier_G_z.x =f.y * kvec.z * scaled_inf_f; fourier_G_z.y =-f.x * kvec.z * scaled_inf_f; // store in global memory d_fourier_mesh_G_x[k] = fourier_G_x; d_fourier_mesh_G_y[k] = fourier_G_y; d_fourier_mesh_G_z[k] = fourier_G_z; } void gpu_update_meshes(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, hipfftComplex *d_fourier_mesh_G_x, hipfftComplex *d_fourier_mesh_G_y, hipfftComplex *d_fourier_mesh_G_z, const Scalar *d_inf_f, const Scalar3 *d_k, unsigned int NNN, unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_update_meshes_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); dim3 grid(n_wave_vectors/run_block_size + 1, 1, 1); hipLaunchKernelGGL((gpu_update_meshes_kernel), dim3(grid), dim3(run_block_size), 0, 0, n_wave_vectors, d_fourier_mesh, d_fourier_mesh_G_x, d_fourier_mesh_G_y, d_fourier_mesh_G_z, d_inf_f, d_k, NNN); } __global__ void gpu_compute_forces_kernel(const unsigned int work_size, const Scalar4 *d_postype, Scalar4 *d_force, const uint3 grid_dim, const uint3 n_ghost_cells, const Scalar *d_charge, const BoxDim box, int order, const unsigned int *d_index_array, const hipfftComplex *inv_fourier_mesh_x, const hipfftComplex *inv_fourier_mesh_y, const hipfftComplex *inv_fourier_mesh_z, const Scalar *d_rho_coeff, const unsigned int offset) { extern __shared__ Scalar s_coeff[]; // load in interpolation coefficients unsigned int ncoeffs = order*(2*order+1); for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ncoeffs) { s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x]; } } __syncthreads(); unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx >= work_size) return; unsigned int group_idx = work_idx + offset; unsigned int idx = d_index_array[group_idx]; int3 inner_dim = make_int3(grid_dim.x-2*n_ghost_cells.x, grid_dim.y-2*n_ghost_cells.y, grid_dim.z-2*n_ghost_cells.z); Scalar4 postype = d_postype[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); unsigned int type = __scalar_as_int(postype.w); Scalar qi = d_charge[idx]; Scalar3 dr = make_scalar3(0,0,0); // find cell the particle is in int3 cell_coord = find_cell(pos, inner_dim.x, inner_dim.y, inner_dim.z, n_ghost_cells, box, order, dr); // ignore particles that are not within our domain (the error should be caught by HOOMD's cell list) if (cell_coord.x < 0 || cell_coord.x >= (int) grid_dim.x || cell_coord.y < 0 || cell_coord.y >= (int) grid_dim.y || cell_coord.z < 0 || cell_coord.z >= (int) grid_dim.z) { return; } Scalar3 force = make_scalar3(0.0,0.0,0.0); int nlower = -(order-1)/2; int nupper = order/2; Scalar result; int mult_fact = 2*order + 1; // back-interpolate forces from neighboring mesh points for (int l = nlower; l <= nupper; ++l) { result = Scalar(0.0); for (int k = order-1; k >= 0; k--) { result = s_coeff[l-nlower + k*mult_fact] + result * dr.x; } Scalar x0 = result; for (int m = nlower; m <= nupper; ++m) { result = Scalar(0.0); for (int k = order-1; k >= 0; k--) { result = s_coeff[m-nlower + k*mult_fact] + result * dr.y; } Scalar y0 = x0*result; for (int n = nlower; n <= nupper; ++n) { result = Scalar(0.0); for (int k = order-1; k >= 0; k--) { result = s_coeff[n-nlower + k*mult_fact] + result * dr.z; } Scalar z0 = y0*result; int neighl = (int) cell_coord.x + l; int neighm = (int) cell_coord.y + m; int neighn = (int) cell_coord.z + n; if (! n_ghost_cells.x) { if (neighl >= (int)grid_dim.x) neighl -= grid_dim.x; else if (neighl < 0) neighl += grid_dim.x; } if (! n_ghost_cells.y) { if (neighm >= (int)grid_dim.y) neighm -= grid_dim.y; else if (neighm < 0) neighm += grid_dim.y; } if (! n_ghost_cells.z) { if (neighn >= (int)grid_dim.z) neighn -= grid_dim.z; else if (neighn < 0) neighn += grid_dim.z; } // use column-major layout unsigned int cell_idx = neighl + grid_dim.x * (neighm + grid_dim.y * neighn); hipfftComplex inv_mesh_x = inv_fourier_mesh_x[cell_idx]; hipfftComplex inv_mesh_y = inv_fourier_mesh_y[cell_idx]; hipfftComplex inv_mesh_z = inv_fourier_mesh_z[cell_idx]; force.x += qi*z0*inv_mesh_x.x; force.y += qi*z0*inv_mesh_y.x; force.z += qi*z0*inv_mesh_z.x; } } } // end neighbor cells loop d_force[idx] = make_scalar4(force.x,force.y,force.z,0.0); } void gpu_compute_forces(const unsigned int N, const Scalar4 *d_postype, Scalar4 *d_force, const hipfftComplex *d_inv_fourier_mesh_x, const hipfftComplex *d_inv_fourier_mesh_y, const hipfftComplex *d_inv_fourier_mesh_z, const uint3 grid_dim, const uint3 n_ghost_cells, const Scalar *d_charge, const BoxDim& box, int order, const unsigned int *d_index_array, const GPUPartition& gpu_partition, const GPUPartition& all_gpu_partition, const Scalar *d_rho_coeff, unsigned int block_size, bool local_fft, unsigned int inv_mesh_elements) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = all_gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = all_gpu_partition.getRangeAndSetGPU(idev); // reset force array for ALL particles hipMemsetAsync(d_force+range.first, 0, sizeof(Scalar4)*(range.second-range.first)); } // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; unsigned int n_blocks = nwork/run_block_size+1; unsigned int shared_bytes = (unsigned int)(order*(2*order+1)*sizeof(Scalar)); hipLaunchKernelGGL((gpu_compute_forces_kernel), dim3(n_blocks), dim3(run_block_size), shared_bytes, 0, nwork, d_postype, d_force, grid_dim, n_ghost_cells, d_charge, box, order, d_index_array, local_fft ? d_inv_fourier_mesh_x + idev*inv_mesh_elements : d_inv_fourier_mesh_x, local_fft ? d_inv_fourier_mesh_y + idev*inv_mesh_elements : d_inv_fourier_mesh_y, local_fft ? d_inv_fourier_mesh_z + idev*inv_mesh_elements : d_inv_fourier_mesh_z, d_rho_coeff, range.first); } } __global__ void kernel_calculate_pe_partial( int n_wave_vectors, Scalar *sum_partial, const hipfftComplex *d_fourier_mesh, const Scalar *d_inf_f, const bool exclude_dc) { HIP_DYNAMIC_SHARED( Scalar, sdata) unsigned int tidx = threadIdx.x; unsigned int j; j = blockDim.x * blockIdx.x + threadIdx.x; Scalar mySum = Scalar(0.0); if (j < n_wave_vectors) { if (! exclude_dc || j != 0) { mySum = d_fourier_mesh[j].x * d_fourier_mesh[j].x + d_fourier_mesh[j].y * d_fourier_mesh[j].y; mySum *= d_inf_f[j]; } } sdata[tidx] = mySum; __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (tidx < offs) { sdata[tidx] += sdata[tidx + offs]; } offs >>= 1; __syncthreads(); } // write result to global memory if (tidx == 0) sum_partial[blockIdx.x] = sdata[0]; } __global__ void kernel_final_reduce_pe(Scalar* sum_partial, unsigned int nblocks, Scalar *sum) { HIP_DYNAMIC_SHARED( Scalar, smem) if (threadIdx.x == 0) *sum = Scalar(0.0); for (int start = 0; start< nblocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < nblocks) smem[threadIdx.x] = sum_partial[start + threadIdx.x]; else smem[threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) smem[threadIdx.x] += smem[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { *sum += smem[0]; } } } void gpu_compute_pe(unsigned int n_wave_vectors, Scalar *d_sum_partial, Scalar *d_sum, const hipfftComplex *d_fourier_mesh, const Scalar *d_inf_f, const unsigned int block_size, const uint3 mesh_dim, const bool exclude_dc) { unsigned int n_blocks = n_wave_vectors/block_size + 1; unsigned int shared_size = (unsigned int)(block_size * sizeof(Scalar)); dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((kernel_calculate_pe_partial), dim3(grid), dim3(block_size), shared_size, 0, n_wave_vectors, d_sum_partial, d_fourier_mesh, d_inf_f, exclude_dc); // calculate final sum of mesh values const unsigned int final_block_size = 256; shared_size = final_block_size*sizeof(Scalar); hipLaunchKernelGGL((kernel_final_reduce_pe), dim3(1), dim3(final_block_size), shared_size, 0, d_sum_partial, n_blocks, d_sum); } __global__ void kernel_calculate_virial_partial( int n_wave_vectors, Scalar *sum_virial_partial, const Scalar *d_mesh_virial) { HIP_DYNAMIC_SHARED( Scalar, sdata) unsigned int j; j = blockDim.x * blockIdx.x + threadIdx.x; unsigned int tidx = threadIdx.x; Scalar mySum_xx = Scalar(0.0); Scalar mySum_xy = Scalar(0.0); Scalar mySum_xz = Scalar(0.0); Scalar mySum_yy = Scalar(0.0); Scalar mySum_yz = Scalar(0.0); Scalar mySum_zz = Scalar(0.0); if (j < n_wave_vectors) { mySum_xx = d_mesh_virial[0*n_wave_vectors+j]; mySum_xy = d_mesh_virial[1*n_wave_vectors+j]; mySum_xz = d_mesh_virial[2*n_wave_vectors+j]; mySum_yy = d_mesh_virial[3*n_wave_vectors+j]; mySum_yz = d_mesh_virial[4*n_wave_vectors+j]; mySum_zz = d_mesh_virial[5*n_wave_vectors+j]; } sdata[0*blockDim.x+tidx] = mySum_xx; sdata[1*blockDim.x+tidx] = mySum_xy; sdata[2*blockDim.x+tidx] = mySum_xz; sdata[3*blockDim.x+tidx] = mySum_yy; sdata[4*blockDim.x+tidx] = mySum_yz; sdata[5*blockDim.x+tidx] = mySum_zz; __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (tidx < offs) { sdata[0*blockDim.x+tidx] += sdata[0*blockDim.x+tidx + offs]; sdata[1*blockDim.x+tidx] += sdata[1*blockDim.x+tidx + offs]; sdata[2*blockDim.x+tidx] += sdata[2*blockDim.x+tidx + offs]; sdata[3*blockDim.x+tidx] += sdata[3*blockDim.x+tidx + offs]; sdata[4*blockDim.x+tidx] += sdata[4*blockDim.x+tidx + offs]; sdata[5*blockDim.x+tidx] += sdata[5*blockDim.x+tidx + offs]; } offs >>= 1; __syncthreads(); } // write result to global memory if (tidx == 0) { sum_virial_partial[0*gridDim.x+blockIdx.x] = sdata[0*blockDim.x]; sum_virial_partial[1*gridDim.x+blockIdx.x] = sdata[1*blockDim.x]; sum_virial_partial[2*gridDim.x+blockIdx.x] = sdata[2*blockDim.x]; sum_virial_partial[3*gridDim.x+blockIdx.x] = sdata[3*blockDim.x]; sum_virial_partial[4*gridDim.x+blockIdx.x] = sdata[4*blockDim.x]; sum_virial_partial[5*gridDim.x+blockIdx.x] = sdata[5*blockDim.x]; } } __global__ void kernel_final_reduce_virial(Scalar* sum_virial_partial, unsigned int nblocks, Scalar *sum_virial) { HIP_DYNAMIC_SHARED( Scalar, smem) if (threadIdx.x == 0) { sum_virial[0] = Scalar(0.0); sum_virial[1] = Scalar(0.0); sum_virial[2] = Scalar(0.0); sum_virial[3] = Scalar(0.0); sum_virial[4] = Scalar(0.0); sum_virial[5] = Scalar(0.0); } for (int start = 0; start< nblocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < nblocks) { smem[0*blockDim.x+threadIdx.x] = sum_virial_partial[0*nblocks+start+threadIdx.x]; smem[1*blockDim.x+threadIdx.x] = sum_virial_partial[1*nblocks+start+threadIdx.x]; smem[2*blockDim.x+threadIdx.x] = sum_virial_partial[2*nblocks+start+threadIdx.x]; smem[3*blockDim.x+threadIdx.x] = sum_virial_partial[3*nblocks+start+threadIdx.x]; smem[4*blockDim.x+threadIdx.x] = sum_virial_partial[4*nblocks+start+threadIdx.x]; smem[5*blockDim.x+threadIdx.x] = sum_virial_partial[5*nblocks+start+threadIdx.x]; } else { smem[0*blockDim.x+threadIdx.x] = Scalar(0.0); smem[1*blockDim.x+threadIdx.x] = Scalar(0.0); smem[2*blockDim.x+threadIdx.x] = Scalar(0.0); smem[3*blockDim.x+threadIdx.x] = Scalar(0.0); smem[4*blockDim.x+threadIdx.x] = Scalar(0.0); smem[5*blockDim.x+threadIdx.x] = Scalar(0.0); } __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { smem[0*blockDim.x+threadIdx.x] += smem[0*blockDim.x+threadIdx.x + offs]; smem[1*blockDim.x+threadIdx.x] += smem[1*blockDim.x+threadIdx.x + offs]; smem[2*blockDim.x+threadIdx.x] += smem[2*blockDim.x+threadIdx.x + offs]; smem[3*blockDim.x+threadIdx.x] += smem[3*blockDim.x+threadIdx.x + offs]; smem[4*blockDim.x+threadIdx.x] += smem[4*blockDim.x+threadIdx.x + offs]; smem[5*blockDim.x+threadIdx.x] += smem[5*blockDim.x+threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { sum_virial[0] += smem[0*blockDim.x]; sum_virial[1] += smem[1*blockDim.x]; sum_virial[2] += smem[2*blockDim.x]; sum_virial[3] += smem[3*blockDim.x]; sum_virial[4] += smem[4*blockDim.x]; sum_virial[5] += smem[5*blockDim.x]; } } } void gpu_compute_virial(unsigned int n_wave_vectors, Scalar *d_sum_virial_partial, Scalar *d_sum_virial, const Scalar *d_mesh_virial, const unsigned int block_size) { unsigned int n_blocks = n_wave_vectors/block_size + 1; unsigned int shared_size = (unsigned int)(6* block_size * sizeof(Scalar)); dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((kernel_calculate_virial_partial), dim3(grid), dim3(block_size), shared_size, 0, n_wave_vectors, d_sum_virial_partial, d_mesh_virial); // calculate final virial values const unsigned int final_block_size = 256; shared_size = 6*final_block_size*sizeof(Scalar); hipLaunchKernelGGL((kernel_final_reduce_virial), dim3(1), dim3(final_block_size), shared_size, 0, d_sum_virial_partial, n_blocks, d_sum_virial); } template<bool local_fft> __global__ void gpu_compute_influence_function_kernel(const uint3 mesh_dim, const unsigned int n_wave_vectors, const uint3 global_dim, Scalar *d_inf_f, Scalar3 *d_k, const Scalar3 b1, const Scalar3 b2, const Scalar3 b3, const uint3 pidx, const uint3 pdim, int nbx, int nby, int nbz, const Scalar *gf_b, int order, Scalar kappa, Scalar alpha) { unsigned int kidx; kidx = blockDim.x * blockIdx.x + threadIdx.x; if (kidx >= n_wave_vectors) return; int l,m,n; if (local_fft) { // use row-major layout int ny = mesh_dim.y; int nx = mesh_dim.x; n = kidx/ny/nx; m = (kidx-n*ny*nx)/nx; l = kidx % nx; } #ifdef ENABLE_MPI else { // local layout: row-major int ny = mesh_dim.y; int nx = mesh_dim.x; int n_local = kidx/ny/nx; int m_local = (kidx-n_local*ny*nx)/nx; int l_local = kidx % nx; // cyclic distribution l = l_local*pdim.x + pidx.x; m = m_local*pdim.y + pidx.y; n = n_local*pdim.z + pidx.z; } #endif // compute Miller indices if (l >= (int)(global_dim.x/2 + global_dim.x%2)) l -= (int) global_dim.x; if (m >= (int)(global_dim.y/2 + global_dim.y%2)) m -= (int) global_dim.y; if (n >= (int)(global_dim.z/2 + global_dim.z%2)) n -= (int) global_dim.z; Scalar val; Scalar3 kval = (Scalar)l*b1+(Scalar)m*b2+(Scalar)n*b3; Scalar3 kH = Scalar(2.0*M_PI)*make_scalar3(Scalar(1.0)/(Scalar)global_dim.x, Scalar(1.0)/(Scalar)global_dim.y, Scalar(1.0)/(Scalar)global_dim.z); Scalar snx = fast::sin(Scalar(0.5)*l*kH.x); Scalar snx2 = snx*snx; Scalar sny = fast::sin(Scalar(0.5)*m*kH.y); Scalar sny2 = sny*sny; Scalar snz = fast::sin(Scalar(0.5)*n*kH.z); Scalar snz2 = snz*snz; Scalar sx(0.0), sy(0.0), sz(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { sx = gf_b[iorder] + sx*snx2; sy = gf_b[iorder] + sy*sny2; sz = gf_b[iorder] + sz*snz2; } Scalar denominator = sx*sy*sz; denominator *= denominator; if (l != 0 || m != 0 || n != 0) { Scalar sum1(0.0); Scalar numerator = Scalar(4.0*M_PI)/dot(kval,kval); for (int ix = -nbx; ix <= nbx; ix++) { Scalar qx = ((Scalar)l + (Scalar)ix*global_dim.x); Scalar3 knx = qx*b1; Scalar argx = Scalar(0.5)*qx*kH.x; Scalar wxs = gpu_sinc(argx); Scalar wx(1.0); for (int iorder = 0; iorder < order; ++iorder) { wx *= wxs; } for (int iy = -nby; iy <= nby; iy++) { Scalar qy = ((Scalar)m + (Scalar)iy*global_dim.y); Scalar3 kny = qy*b2; Scalar argy = Scalar(0.5)*qy*kH.y; Scalar wys = gpu_sinc(argy); Scalar wy(1.0); for (int iorder = 0; iorder < order; ++iorder) { wy *= wys; } for (int iz = -nbz; iz <= nbz; iz++) { Scalar qz = ((Scalar)n + (Scalar)iz*global_dim.z); Scalar3 knz = qz*b3; Scalar argz = Scalar(0.5)*qz*kH.z; Scalar wzs = gpu_sinc(argz); Scalar wz(1.0); for (int iorder = 0; iorder < order; ++iorder) { wz *= wzs; } Scalar3 kn = knx + kny + knz; Scalar dot1 = dot(kn, kval); Scalar dot2 = dot(kn, kn)+alpha*alpha; Scalar arg_gauss = Scalar(0.25)*dot2/kappa/kappa; Scalar gauss = exp(-arg_gauss); sum1 += (dot1/dot2) * gauss * wx * wx * wy * wy * wz * wz; } } } val = numerator*sum1/denominator; } else { val = Scalar(0.0); } // write out result d_inf_f[kidx] = val; d_k[kidx] = kval; } void gpu_compute_influence_function(const uint3 mesh_dim, const uint3 global_dim, Scalar *d_inf_f, Scalar3 *d_k, const BoxDim& global_box, const bool local_fft, const uint3 pidx, const uint3 pdim, const Scalar EPS_HOC, Scalar kappa, Scalar alpha, const Scalar *d_gf_b, int order, unsigned int block_size) { // compute reciprocal lattice vectors Scalar3 a1 = global_box.getLatticeVector(0); Scalar3 a2 = global_box.getLatticeVector(1); Scalar3 a3 = global_box.getLatticeVector(2); Scalar V_box = global_box.getVolume(); Scalar3 b1 = Scalar(2.0*M_PI)*make_scalar3(a2.y*a3.z-a2.z*a3.y, a2.z*a3.x-a2.x*a3.z, a2.x*a3.y-a2.y*a3.x)/V_box; Scalar3 b2 = Scalar(2.0*M_PI)*make_scalar3(a3.y*a1.z-a3.z*a1.y, a3.z*a1.x-a3.x*a1.z, a3.x*a1.y-a3.y*a1.x)/V_box; Scalar3 b3 = Scalar(2.0*M_PI)*make_scalar3(a1.y*a2.z-a1.z*a2.y, a1.z*a2.x-a1.x*a2.z, a1.x*a2.y-a1.y*a2.x)/V_box; unsigned int num_wave_vectors = mesh_dim.x*mesh_dim.y*mesh_dim.z; Scalar3 L = global_box.getL(); Scalar temp = floor(((kappa*L.x/(M_PI*global_dim.x)) * pow(-log(EPS_HOC),0.25))); int nbx = (int)temp; temp = floor(((kappa*L.y/(M_PI*global_dim.y)) * pow(-log(EPS_HOC),0.25))); int nby = (int)temp; temp = floor(((kappa*L.z/(M_PI*global_dim.z)) * pow(-log(EPS_HOC),0.25))); int nbz = (int)temp; if (local_fft) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<true>); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); unsigned int n_blocks = num_wave_vectors/run_block_size; if (num_wave_vectors % run_block_size) n_blocks += 1; dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((gpu_compute_influence_function_kernel<true>), dim3(grid), dim3(run_block_size), 0, 0, mesh_dim, num_wave_vectors, global_dim, d_inf_f, d_k, b1, b2, b3, pidx, pdim, nbx, nby, nbz, d_gf_b, order, kappa, alpha); } #ifdef ENABLE_MPI else { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<false>); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); unsigned int n_blocks = num_wave_vectors/run_block_size; if (num_wave_vectors % run_block_size) n_blocks += 1; dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((gpu_compute_influence_function_kernel<false>), dim3(grid), dim3(run_block_size), 0, 0, mesh_dim, num_wave_vectors, global_dim, d_inf_f, d_k, b1, b2, b3, pidx, pdim, nbx, nby, nbz, d_gf_b, order, kappa, alpha); } #endif } //! The developer has chosen not to document this function __global__ void gpu_fix_exclusions_kernel(Scalar4 *d_force, Scalar *d_virial, const size_t virial_pitch, const Scalar4 *d_pos, const Scalar *d_charge, const BoxDim box, const unsigned int *d_n_neigh, const unsigned int *d_nlist, const Index2D nli, Scalar kappa, Scalar alpha, unsigned int *d_group_members, unsigned int group_size) { // start by identifying which particle we are to handle int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; const Scalar sqrtpi = sqrtf(M_PI); unsigned int n_neigh = d_n_neigh[idx]; Scalar4 postypei = __ldg(d_pos + idx); Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z); Scalar qi = __ldg(d_charge + idx); // initialize the force to 0 Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar virial[6]; for (unsigned int i = 0; i < 6; i++) virial[i] = Scalar(0.0); unsigned int cur_j = 0; // prefetch neighbor index unsigned int next_j = d_nlist[nli(idx, 0)]; for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++) { { // read the current neighbor index (MEM TRANSFER: 4 bytes) // prefetch the next value and set the current one cur_j = next_j; if (neigh_idx+1 < n_neigh) next_j = d_nlist[nli(idx, neigh_idx+1)]; // get the neighbor's position (MEM TRANSFER: 16 bytes) Scalar4 postypej = __ldg(d_pos + cur_j); Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z); Scalar qj = __ldg(d_charge + cur_j); // calculate dr (with periodic boundary conditions) (FLOPS: 3) Scalar3 dx = posi - posj; // apply periodic boundary conditions: (FLOPS 12) dx = box.minImage(dx); // calculate r squared (FLOPS: 5) Scalar rsq = dot(dx,dx); Scalar r = sqrtf(rsq); Scalar qiqj = qi * qj; Scalar expfac = fast::exp(-alpha*r); Scalar arg1 = kappa * r - alpha/Scalar(2.0)/kappa; Scalar arg2 = kappa * r + alpha/Scalar(2.0)/kappa; Scalar erffac = (::erf(arg1)*expfac + expfac - fast::erfc(arg2)*exp(alpha*r))/(Scalar(2.0)*r); Scalar force_divr = qiqj * (expfac*Scalar(2.0)*kappa/sqrtpi*fast::exp(-arg1*arg1) - Scalar(0.5)*alpha*(expfac*::erfc(arg1)+fast::exp(alpha*r)*fast::erfc(arg2)) - erffac)/rsq; // subtract long-range part of pair-interaction Scalar pair_eng = -qiqj * erffac; Scalar force_div2r = Scalar(0.5) * force_divr; virial[0] += dx.x * dx.x * force_div2r; virial[1] += dx.x * dx.y * force_div2r; virial[2] += dx.x * dx.z * force_div2r; virial[3] += dx.y * dx.y * force_div2r; virial[4] += dx.y * dx.z * force_div2r; virial[5] += dx.z * dx.z * force_div2r; force.x += dx.x * force_divr; force.y += dx.y * force_divr; force.z += dx.z * force_divr; force.w += pair_eng; } } force.w *= Scalar(0.5); d_force[idx].x += force.x; d_force[idx].y += force.y; d_force[idx].z += force.z; d_force[idx].w += force.w; for (unsigned int i = 0; i < 6; i++) d_virial[i*virial_pitch+idx] += virial[i]; } } //! The developer has chosen not to document this function hipError_t gpu_fix_exclusions(Scalar4 *d_force, Scalar *d_virial, const size_t virial_pitch, const unsigned int Nmax, const Scalar4 *d_pos, const Scalar *d_charge, const BoxDim& box, const unsigned int *d_n_ex, const unsigned int *d_exlist, const Index2D nex, Scalar kappa, Scalar alpha, unsigned int *d_group_members, unsigned int group_size, int block_size) { dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); hipLaunchKernelGGL((gpu_fix_exclusions_kernel), dim3(grid), dim3(threads ), 0, 0, d_force, d_virial, virial_pitch, d_pos, d_charge, box, d_n_ex, d_exlist, nex, kappa, alpha, d_group_members, group_size); return hipSuccess; }
f8cd1dc4052eeb41f15350d4b3fb05256c85f8dc.cu
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "PPPMForceComputeGPU.cuh" #include "hoomd/TextureTools.h" // __scalar2int_rd is __float2int_rd in single, __double2int_rd in double #ifdef SINGLE_PRECISION #define __scalar2int_rd __float2int_rd #else #define __scalar2int_rd __double2int_rd #endif #define GPU_PPPM_MAX_ORDER 7 // workaround for HIP bug #ifdef __HIP_PLATFORM_HCC__ inline __device__ float myAtomicAdd(float* address, float val) { unsigned int* address_as_uint = (unsigned int*)address; unsigned int old = *address_as_uint, assumed; do { assumed = old; old = atomicCAS(address_as_uint, assumed, __float_as_uint(val + __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } #else inline __device__ float myAtomicAdd(float* address, float val) { return atomicAdd(address, val); } #endif //! GPU implementation of sinc(x)==sin(x)/x __device__ Scalar gpu_sinc(Scalar x) { Scalar sinc = 0; //! Coefficients of a power expansion of sin(x)/x const Scalar sinc_coeff[] = {Scalar(1.0), Scalar(-1.0/6.0), Scalar(1.0/120.0), Scalar(-1.0/5040.0),Scalar(1.0/362880.0), Scalar(-1.0/39916800.0)}; if (x*x <= Scalar(1.0)) { Scalar term = Scalar(1.0); for (unsigned int i = 0; i < 6; ++i) { sinc += sinc_coeff[i] * term; term *= x*x; } } else { sinc = fast::sin(x)/x; } return sinc; } __device__ int3 find_cell(const Scalar3& pos, const unsigned int& inner_nx, const unsigned int& inner_ny, const unsigned int& inner_nz, const uint3& n_ghost_cells, const BoxDim& box, int order, Scalar3& dr) { // compute coordinates in units of the mesh size Scalar3 f = box.makeFraction(pos); uchar3 periodic = box.getPeriodic(); Scalar3 reduced_pos = make_scalar3(f.x * (Scalar)inner_nx, f.y * (Scalar)inner_ny, f.z * (Scalar)inner_nz); reduced_pos += make_scalar3(n_ghost_cells.x, n_ghost_cells.y, n_ghost_cells.z); Scalar shift, shiftone; if (order % 2) { shift =Scalar(0.5); shiftone = Scalar(0.0); } else { shift = Scalar(0.0); shiftone = Scalar(0.5); } int ix = __scalar2int_rd(reduced_pos.x + shift); int iy = __scalar2int_rd(reduced_pos.y + shift); int iz = __scalar2int_rd(reduced_pos.z + shift); // set distance to cell center dr.x = shiftone + (Scalar) ix - reduced_pos.x; dr.y = shiftone + (Scalar) iy - reduced_pos.y; dr.z = shiftone + (Scalar) iz - reduced_pos.z; // handle particles on the boundary if (periodic.x && ix == (int)inner_nx) ix = 0; if (periodic.y && iy == (int)inner_ny) iy = 0; if (periodic.z && iz == (int)inner_nz) iz = 0; return make_int3(ix, iy, iz); } __global__ void gpu_assign_particles_kernel(const uint3 mesh_dim, const uint3 n_ghost_bins, unsigned int work_size, const unsigned int *d_index_array, const Scalar4 *d_postype, const Scalar *d_charge, hipfftComplex *d_mesh, Scalar V_cell, int order, unsigned int offset, BoxDim box, const Scalar *d_rho_coeff) { extern __shared__ Scalar s_coeff[]; // load in interpolation coefficients unsigned int ncoeffs = order*(2*order+1); for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ncoeffs) { s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x]; } } __syncthreads(); unsigned int work_idx = blockIdx.x*blockDim.x+threadIdx.x; if (work_idx >= work_size) return; unsigned int group_idx = work_idx + offset; int3 bin_dim = make_int3(mesh_dim.x+2*n_ghost_bins.x, mesh_dim.y+2*n_ghost_bins.y, mesh_dim.z+2*n_ghost_bins.z); // grid coordinates of bin (column-major) unsigned int idx = d_index_array[group_idx]; Scalar4 postype = d_postype[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); Scalar qi = d_charge[idx]; // compute coordinates in units of the cell size Scalar3 dr = make_scalar3(0,0,0); int3 bin_coord = find_cell(pos, mesh_dim.x, mesh_dim.y, mesh_dim.z, n_ghost_bins, box, order, dr); // ignore particles that are not within our domain (the error should be caught by HOOMD's cell list) if (bin_coord.x < 0 || bin_coord.x >= bin_dim.x || bin_coord.y < 0 || bin_coord.y >= bin_dim.y || bin_coord.z < 0 || bin_coord.z >= bin_dim.z) { return; } int i = bin_coord.x; int j = bin_coord.y; int k = bin_coord.z; int nlower = - (order - 1)/2; int nupper = order/2; Scalar result; int mult_fact = 2*order + 1; Scalar x0 = qi; bool ignore_x = false; bool ignore_y = false; bool ignore_z = false; // loop over neighboring bins for (int l = nlower; l <= nupper; ++l) { // precalculate assignment factor result = Scalar(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { result = s_coeff[l-nlower + iorder*mult_fact] + result * dr.x; } Scalar y0 = x0 * result; int neighi = i + l; if (neighi >= (int)bin_dim.x) { if (! n_ghost_bins.x) neighi -= (int)bin_dim.x; else ignore_x = true; } else if (neighi < 0) { if (! n_ghost_bins.x) neighi += (int)bin_dim.x; else ignore_x = true; } for (int m = nlower; m <= nupper; ++m) { result = Scalar(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { result = s_coeff[m-nlower + iorder*mult_fact] + result * dr.y; } Scalar z0 = y0 * result; int neighj = j + m; if (neighj >= (int) bin_dim.y) { if (! n_ghost_bins.y) neighj -= (int)bin_dim.y; else ignore_y = true; } else if (neighj < 0) { if (! n_ghost_bins.y) neighj += (int)bin_dim.y; else ignore_y = true; } for (int n = nlower; n <= nupper; ++n) { result = Scalar(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { result = s_coeff[n-nlower + iorder*mult_fact] + result * dr.z; } int neighk = k + n; if (neighk >= (int)bin_dim.z) { if (! n_ghost_bins.z) neighk -= (int)bin_dim.z; else ignore_z = true; } else if (neighk < 0) { if (! n_ghost_bins.z) neighk += (int)bin_dim.z; else ignore_z = true; } if (!ignore_x && !ignore_y && !ignore_z) { // write out to global memory using row-major unsigned int cell_idx = neighi + bin_dim.x * (neighj + bin_dim.y * neighk); // compute fraction of particle density assigned to cell // from particles in this bin myAtomicAdd(&d_mesh[cell_idx].x, z0*result/V_cell); } ignore_z = false; } ignore_y = false; } ignore_x = false; } // end of loop over neighboring bins } __global__ void gpu_reduce_meshes(const unsigned int mesh_elements, const hipfftComplex *d_mesh_scratch, hipfftComplex *d_mesh, unsigned int ngpu) { unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= mesh_elements) return; hipfftComplex res; res.x = 0; res.y = 0; // reduce over all temporary meshes for (unsigned int igpu = 0; igpu < ngpu; ++igpu) { hipfftComplex m = d_mesh_scratch[idx + igpu*mesh_elements]; res.x += m.x; res.y += m.y; } d_mesh[idx] = res; } void gpu_assign_particles(const uint3 mesh_dim, const uint3 n_ghost_bins, const uint3 grid_dim, unsigned int group_size, const unsigned int *d_index_array, const Scalar4 *d_postype, const Scalar *d_charge, hipfftComplex *d_mesh, hipfftComplex *d_mesh_scratch, const unsigned int mesh_elements, int order, const BoxDim& box, unsigned int block_size, const Scalar *d_rho_coeff, const hipDeviceProp_t& dev_prop, const GPUPartition &gpu_partition ) { hipMemsetAsync(d_mesh, 0, sizeof(hipfftComplex)*grid_dim.x*grid_dim.y*grid_dim.z); Scalar V_cell = box.getVolume()/(Scalar)(mesh_dim.x*mesh_dim.y*mesh_dim.z); static unsigned int max_block_size = UINT_MAX; static hipFuncAttributes attr; if (max_block_size == UINT_MAX) { hipFuncGetAttributes(&attr, (const void*)gpu_assign_particles_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); while (attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { run_block_size -= dev_prop.warpSize; } // iterate over active GPUs in reverse, to end up on first GPU when returning from this function unsigned int ngpu = gpu_partition.getNumActiveGPUs(); for (int idev = ngpu - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); if (ngpu > 1) { // zero the temporary mesh array hipMemsetAsync(d_mesh_scratch + idev*mesh_elements, 0, sizeof(hipfftComplex)*mesh_elements); } unsigned int nwork = range.second - range.first; unsigned int n_blocks = nwork/run_block_size+1; unsigned int shared_bytes = (unsigned int)(order*(2*order+1)*sizeof(Scalar)); hipLaunchKernelGGL((gpu_assign_particles_kernel), dim3(n_blocks), dim3(run_block_size), shared_bytes, 0, mesh_dim, n_ghost_bins, nwork, d_index_array, d_postype, d_charge, ngpu > 1 ? d_mesh_scratch + idev*mesh_elements : d_mesh, V_cell, order, range.first, box, d_rho_coeff); } } //! Reduce temporary arrays for every GPU void gpu_reduce_meshes(const unsigned int mesh_elements, const hipfftComplex *d_mesh_scratch, hipfftComplex *d_mesh, const unsigned int ngpu, const unsigned int block_size) { // reduce meshes on GPU 0 hipLaunchKernelGGL((gpu_reduce_meshes), dim3(mesh_elements/block_size + 1), dim3(block_size), 0, 0, mesh_elements, d_mesh_scratch, d_mesh, ngpu); } __global__ void gpu_compute_mesh_virial_kernel(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, Scalar *d_inf_f, Scalar *d_virial_mesh, const Scalar3 *d_k, const bool exclude_dc, Scalar kappa ) { unsigned int idx; idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n_wave_vectors) return; if (!exclude_dc || idx != 0) { // non-zero wave vector hipfftComplex fourier = d_fourier_mesh[idx]; Scalar3 k = d_k[idx]; Scalar rhog = (fourier.x * fourier.x + fourier.y * fourier.y)*d_inf_f[idx]; Scalar vterm = -Scalar(2.0)*(Scalar(1.0)/dot(k,k) + Scalar(0.25)/(kappa*kappa)); d_virial_mesh[0*n_wave_vectors+idx] = rhog*(Scalar(1.0) + vterm*k.x*k.x); // xx d_virial_mesh[1*n_wave_vectors+idx] = rhog*( vterm*k.x*k.y); // xy d_virial_mesh[2*n_wave_vectors+idx] = rhog*( vterm*k.x*k.z); // xz d_virial_mesh[3*n_wave_vectors+idx] = rhog*(Scalar(1.0) + vterm*k.y*k.y); // yy d_virial_mesh[4*n_wave_vectors+idx] = rhog*( vterm*k.y*k.z); // yz d_virial_mesh[5*n_wave_vectors+idx] = rhog*(Scalar(1.0) + vterm*k.z*k.z); // zz } else { d_virial_mesh[0*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[1*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[2*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[3*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[4*n_wave_vectors+idx] = Scalar(0.0); d_virial_mesh[5*n_wave_vectors+idx] = Scalar(0.0); } } void gpu_compute_mesh_virial(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, Scalar *d_inf_f, Scalar *d_virial_mesh, const Scalar3 *d_k, const bool exclude_dc, Scalar kappa) { const unsigned int block_size = 256; dim3 grid(n_wave_vectors/block_size + 1, 1, 1); hipLaunchKernelGGL((gpu_compute_mesh_virial_kernel), dim3(grid), dim3(block_size), 0, 0, n_wave_vectors, d_fourier_mesh, d_inf_f, d_virial_mesh, d_k, exclude_dc, kappa); } __global__ void gpu_update_meshes_kernel(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, hipfftComplex *d_fourier_mesh_G_x, hipfftComplex *d_fourier_mesh_G_y, hipfftComplex *d_fourier_mesh_G_z, const Scalar *d_inf_f, const Scalar3 *d_k, unsigned int NNN) { unsigned int k; k = blockDim.x * blockIdx.x + threadIdx.x; if (k >= n_wave_vectors) return; hipfftComplex f = d_fourier_mesh[k]; Scalar scaled_inf_f = d_inf_f[k] / ((Scalar)NNN); Scalar3 kvec = d_k[k]; // Normalization hipfftComplex fourier_G_x; fourier_G_x.x =f.y * kvec.x * scaled_inf_f; fourier_G_x.y =-f.x * kvec.x * scaled_inf_f; hipfftComplex fourier_G_y; fourier_G_y.x =f.y * kvec.y * scaled_inf_f; fourier_G_y.y =-f.x * kvec.y * scaled_inf_f; hipfftComplex fourier_G_z; fourier_G_z.x =f.y * kvec.z * scaled_inf_f; fourier_G_z.y =-f.x * kvec.z * scaled_inf_f; // store in global memory d_fourier_mesh_G_x[k] = fourier_G_x; d_fourier_mesh_G_y[k] = fourier_G_y; d_fourier_mesh_G_z[k] = fourier_G_z; } void gpu_update_meshes(const unsigned int n_wave_vectors, hipfftComplex *d_fourier_mesh, hipfftComplex *d_fourier_mesh_G_x, hipfftComplex *d_fourier_mesh_G_y, hipfftComplex *d_fourier_mesh_G_z, const Scalar *d_inf_f, const Scalar3 *d_k, unsigned int NNN, unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_update_meshes_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); dim3 grid(n_wave_vectors/run_block_size + 1, 1, 1); hipLaunchKernelGGL((gpu_update_meshes_kernel), dim3(grid), dim3(run_block_size), 0, 0, n_wave_vectors, d_fourier_mesh, d_fourier_mesh_G_x, d_fourier_mesh_G_y, d_fourier_mesh_G_z, d_inf_f, d_k, NNN); } __global__ void gpu_compute_forces_kernel(const unsigned int work_size, const Scalar4 *d_postype, Scalar4 *d_force, const uint3 grid_dim, const uint3 n_ghost_cells, const Scalar *d_charge, const BoxDim box, int order, const unsigned int *d_index_array, const hipfftComplex *inv_fourier_mesh_x, const hipfftComplex *inv_fourier_mesh_y, const hipfftComplex *inv_fourier_mesh_z, const Scalar *d_rho_coeff, const unsigned int offset) { extern __shared__ Scalar s_coeff[]; // load in interpolation coefficients unsigned int ncoeffs = order*(2*order+1); for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ncoeffs) { s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x]; } } __syncthreads(); unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx >= work_size) return; unsigned int group_idx = work_idx + offset; unsigned int idx = d_index_array[group_idx]; int3 inner_dim = make_int3(grid_dim.x-2*n_ghost_cells.x, grid_dim.y-2*n_ghost_cells.y, grid_dim.z-2*n_ghost_cells.z); Scalar4 postype = d_postype[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); unsigned int type = __scalar_as_int(postype.w); Scalar qi = d_charge[idx]; Scalar3 dr = make_scalar3(0,0,0); // find cell the particle is in int3 cell_coord = find_cell(pos, inner_dim.x, inner_dim.y, inner_dim.z, n_ghost_cells, box, order, dr); // ignore particles that are not within our domain (the error should be caught by HOOMD's cell list) if (cell_coord.x < 0 || cell_coord.x >= (int) grid_dim.x || cell_coord.y < 0 || cell_coord.y >= (int) grid_dim.y || cell_coord.z < 0 || cell_coord.z >= (int) grid_dim.z) { return; } Scalar3 force = make_scalar3(0.0,0.0,0.0); int nlower = -(order-1)/2; int nupper = order/2; Scalar result; int mult_fact = 2*order + 1; // back-interpolate forces from neighboring mesh points for (int l = nlower; l <= nupper; ++l) { result = Scalar(0.0); for (int k = order-1; k >= 0; k--) { result = s_coeff[l-nlower + k*mult_fact] + result * dr.x; } Scalar x0 = result; for (int m = nlower; m <= nupper; ++m) { result = Scalar(0.0); for (int k = order-1; k >= 0; k--) { result = s_coeff[m-nlower + k*mult_fact] + result * dr.y; } Scalar y0 = x0*result; for (int n = nlower; n <= nupper; ++n) { result = Scalar(0.0); for (int k = order-1; k >= 0; k--) { result = s_coeff[n-nlower + k*mult_fact] + result * dr.z; } Scalar z0 = y0*result; int neighl = (int) cell_coord.x + l; int neighm = (int) cell_coord.y + m; int neighn = (int) cell_coord.z + n; if (! n_ghost_cells.x) { if (neighl >= (int)grid_dim.x) neighl -= grid_dim.x; else if (neighl < 0) neighl += grid_dim.x; } if (! n_ghost_cells.y) { if (neighm >= (int)grid_dim.y) neighm -= grid_dim.y; else if (neighm < 0) neighm += grid_dim.y; } if (! n_ghost_cells.z) { if (neighn >= (int)grid_dim.z) neighn -= grid_dim.z; else if (neighn < 0) neighn += grid_dim.z; } // use column-major layout unsigned int cell_idx = neighl + grid_dim.x * (neighm + grid_dim.y * neighn); hipfftComplex inv_mesh_x = inv_fourier_mesh_x[cell_idx]; hipfftComplex inv_mesh_y = inv_fourier_mesh_y[cell_idx]; hipfftComplex inv_mesh_z = inv_fourier_mesh_z[cell_idx]; force.x += qi*z0*inv_mesh_x.x; force.y += qi*z0*inv_mesh_y.x; force.z += qi*z0*inv_mesh_z.x; } } } // end neighbor cells loop d_force[idx] = make_scalar4(force.x,force.y,force.z,0.0); } void gpu_compute_forces(const unsigned int N, const Scalar4 *d_postype, Scalar4 *d_force, const hipfftComplex *d_inv_fourier_mesh_x, const hipfftComplex *d_inv_fourier_mesh_y, const hipfftComplex *d_inv_fourier_mesh_z, const uint3 grid_dim, const uint3 n_ghost_cells, const Scalar *d_charge, const BoxDim& box, int order, const unsigned int *d_index_array, const GPUPartition& gpu_partition, const GPUPartition& all_gpu_partition, const Scalar *d_rho_coeff, unsigned int block_size, bool local_fft, unsigned int inv_mesh_elements) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = all_gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = all_gpu_partition.getRangeAndSetGPU(idev); // reset force array for ALL particles hipMemsetAsync(d_force+range.first, 0, sizeof(Scalar4)*(range.second-range.first)); } // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; unsigned int n_blocks = nwork/run_block_size+1; unsigned int shared_bytes = (unsigned int)(order*(2*order+1)*sizeof(Scalar)); hipLaunchKernelGGL((gpu_compute_forces_kernel), dim3(n_blocks), dim3(run_block_size), shared_bytes, 0, nwork, d_postype, d_force, grid_dim, n_ghost_cells, d_charge, box, order, d_index_array, local_fft ? d_inv_fourier_mesh_x + idev*inv_mesh_elements : d_inv_fourier_mesh_x, local_fft ? d_inv_fourier_mesh_y + idev*inv_mesh_elements : d_inv_fourier_mesh_y, local_fft ? d_inv_fourier_mesh_z + idev*inv_mesh_elements : d_inv_fourier_mesh_z, d_rho_coeff, range.first); } } __global__ void kernel_calculate_pe_partial( int n_wave_vectors, Scalar *sum_partial, const hipfftComplex *d_fourier_mesh, const Scalar *d_inf_f, const bool exclude_dc) { HIP_DYNAMIC_SHARED( Scalar, sdata) unsigned int tidx = threadIdx.x; unsigned int j; j = blockDim.x * blockIdx.x + threadIdx.x; Scalar mySum = Scalar(0.0); if (j < n_wave_vectors) { if (! exclude_dc || j != 0) { mySum = d_fourier_mesh[j].x * d_fourier_mesh[j].x + d_fourier_mesh[j].y * d_fourier_mesh[j].y; mySum *= d_inf_f[j]; } } sdata[tidx] = mySum; __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (tidx < offs) { sdata[tidx] += sdata[tidx + offs]; } offs >>= 1; __syncthreads(); } // write result to global memory if (tidx == 0) sum_partial[blockIdx.x] = sdata[0]; } __global__ void kernel_final_reduce_pe(Scalar* sum_partial, unsigned int nblocks, Scalar *sum) { HIP_DYNAMIC_SHARED( Scalar, smem) if (threadIdx.x == 0) *sum = Scalar(0.0); for (int start = 0; start< nblocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < nblocks) smem[threadIdx.x] = sum_partial[start + threadIdx.x]; else smem[threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) smem[threadIdx.x] += smem[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { *sum += smem[0]; } } } void gpu_compute_pe(unsigned int n_wave_vectors, Scalar *d_sum_partial, Scalar *d_sum, const hipfftComplex *d_fourier_mesh, const Scalar *d_inf_f, const unsigned int block_size, const uint3 mesh_dim, const bool exclude_dc) { unsigned int n_blocks = n_wave_vectors/block_size + 1; unsigned int shared_size = (unsigned int)(block_size * sizeof(Scalar)); dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((kernel_calculate_pe_partial), dim3(grid), dim3(block_size), shared_size, 0, n_wave_vectors, d_sum_partial, d_fourier_mesh, d_inf_f, exclude_dc); // calculate final sum of mesh values const unsigned int final_block_size = 256; shared_size = final_block_size*sizeof(Scalar); hipLaunchKernelGGL((kernel_final_reduce_pe), dim3(1), dim3(final_block_size), shared_size, 0, d_sum_partial, n_blocks, d_sum); } __global__ void kernel_calculate_virial_partial( int n_wave_vectors, Scalar *sum_virial_partial, const Scalar *d_mesh_virial) { HIP_DYNAMIC_SHARED( Scalar, sdata) unsigned int j; j = blockDim.x * blockIdx.x + threadIdx.x; unsigned int tidx = threadIdx.x; Scalar mySum_xx = Scalar(0.0); Scalar mySum_xy = Scalar(0.0); Scalar mySum_xz = Scalar(0.0); Scalar mySum_yy = Scalar(0.0); Scalar mySum_yz = Scalar(0.0); Scalar mySum_zz = Scalar(0.0); if (j < n_wave_vectors) { mySum_xx = d_mesh_virial[0*n_wave_vectors+j]; mySum_xy = d_mesh_virial[1*n_wave_vectors+j]; mySum_xz = d_mesh_virial[2*n_wave_vectors+j]; mySum_yy = d_mesh_virial[3*n_wave_vectors+j]; mySum_yz = d_mesh_virial[4*n_wave_vectors+j]; mySum_zz = d_mesh_virial[5*n_wave_vectors+j]; } sdata[0*blockDim.x+tidx] = mySum_xx; sdata[1*blockDim.x+tidx] = mySum_xy; sdata[2*blockDim.x+tidx] = mySum_xz; sdata[3*blockDim.x+tidx] = mySum_yy; sdata[4*blockDim.x+tidx] = mySum_yz; sdata[5*blockDim.x+tidx] = mySum_zz; __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (tidx < offs) { sdata[0*blockDim.x+tidx] += sdata[0*blockDim.x+tidx + offs]; sdata[1*blockDim.x+tidx] += sdata[1*blockDim.x+tidx + offs]; sdata[2*blockDim.x+tidx] += sdata[2*blockDim.x+tidx + offs]; sdata[3*blockDim.x+tidx] += sdata[3*blockDim.x+tidx + offs]; sdata[4*blockDim.x+tidx] += sdata[4*blockDim.x+tidx + offs]; sdata[5*blockDim.x+tidx] += sdata[5*blockDim.x+tidx + offs]; } offs >>= 1; __syncthreads(); } // write result to global memory if (tidx == 0) { sum_virial_partial[0*gridDim.x+blockIdx.x] = sdata[0*blockDim.x]; sum_virial_partial[1*gridDim.x+blockIdx.x] = sdata[1*blockDim.x]; sum_virial_partial[2*gridDim.x+blockIdx.x] = sdata[2*blockDim.x]; sum_virial_partial[3*gridDim.x+blockIdx.x] = sdata[3*blockDim.x]; sum_virial_partial[4*gridDim.x+blockIdx.x] = sdata[4*blockDim.x]; sum_virial_partial[5*gridDim.x+blockIdx.x] = sdata[5*blockDim.x]; } } __global__ void kernel_final_reduce_virial(Scalar* sum_virial_partial, unsigned int nblocks, Scalar *sum_virial) { HIP_DYNAMIC_SHARED( Scalar, smem) if (threadIdx.x == 0) { sum_virial[0] = Scalar(0.0); sum_virial[1] = Scalar(0.0); sum_virial[2] = Scalar(0.0); sum_virial[3] = Scalar(0.0); sum_virial[4] = Scalar(0.0); sum_virial[5] = Scalar(0.0); } for (int start = 0; start< nblocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < nblocks) { smem[0*blockDim.x+threadIdx.x] = sum_virial_partial[0*nblocks+start+threadIdx.x]; smem[1*blockDim.x+threadIdx.x] = sum_virial_partial[1*nblocks+start+threadIdx.x]; smem[2*blockDim.x+threadIdx.x] = sum_virial_partial[2*nblocks+start+threadIdx.x]; smem[3*blockDim.x+threadIdx.x] = sum_virial_partial[3*nblocks+start+threadIdx.x]; smem[4*blockDim.x+threadIdx.x] = sum_virial_partial[4*nblocks+start+threadIdx.x]; smem[5*blockDim.x+threadIdx.x] = sum_virial_partial[5*nblocks+start+threadIdx.x]; } else { smem[0*blockDim.x+threadIdx.x] = Scalar(0.0); smem[1*blockDim.x+threadIdx.x] = Scalar(0.0); smem[2*blockDim.x+threadIdx.x] = Scalar(0.0); smem[3*blockDim.x+threadIdx.x] = Scalar(0.0); smem[4*blockDim.x+threadIdx.x] = Scalar(0.0); smem[5*blockDim.x+threadIdx.x] = Scalar(0.0); } __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { smem[0*blockDim.x+threadIdx.x] += smem[0*blockDim.x+threadIdx.x + offs]; smem[1*blockDim.x+threadIdx.x] += smem[1*blockDim.x+threadIdx.x + offs]; smem[2*blockDim.x+threadIdx.x] += smem[2*blockDim.x+threadIdx.x + offs]; smem[3*blockDim.x+threadIdx.x] += smem[3*blockDim.x+threadIdx.x + offs]; smem[4*blockDim.x+threadIdx.x] += smem[4*blockDim.x+threadIdx.x + offs]; smem[5*blockDim.x+threadIdx.x] += smem[5*blockDim.x+threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { sum_virial[0] += smem[0*blockDim.x]; sum_virial[1] += smem[1*blockDim.x]; sum_virial[2] += smem[2*blockDim.x]; sum_virial[3] += smem[3*blockDim.x]; sum_virial[4] += smem[4*blockDim.x]; sum_virial[5] += smem[5*blockDim.x]; } } } void gpu_compute_virial(unsigned int n_wave_vectors, Scalar *d_sum_virial_partial, Scalar *d_sum_virial, const Scalar *d_mesh_virial, const unsigned int block_size) { unsigned int n_blocks = n_wave_vectors/block_size + 1; unsigned int shared_size = (unsigned int)(6* block_size * sizeof(Scalar)); dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((kernel_calculate_virial_partial), dim3(grid), dim3(block_size), shared_size, 0, n_wave_vectors, d_sum_virial_partial, d_mesh_virial); // calculate final virial values const unsigned int final_block_size = 256; shared_size = 6*final_block_size*sizeof(Scalar); hipLaunchKernelGGL((kernel_final_reduce_virial), dim3(1), dim3(final_block_size), shared_size, 0, d_sum_virial_partial, n_blocks, d_sum_virial); } template<bool local_fft> __global__ void gpu_compute_influence_function_kernel(const uint3 mesh_dim, const unsigned int n_wave_vectors, const uint3 global_dim, Scalar *d_inf_f, Scalar3 *d_k, const Scalar3 b1, const Scalar3 b2, const Scalar3 b3, const uint3 pidx, const uint3 pdim, int nbx, int nby, int nbz, const Scalar *gf_b, int order, Scalar kappa, Scalar alpha) { unsigned int kidx; kidx = blockDim.x * blockIdx.x + threadIdx.x; if (kidx >= n_wave_vectors) return; int l,m,n; if (local_fft) { // use row-major layout int ny = mesh_dim.y; int nx = mesh_dim.x; n = kidx/ny/nx; m = (kidx-n*ny*nx)/nx; l = kidx % nx; } #ifdef ENABLE_MPI else { // local layout: row-major int ny = mesh_dim.y; int nx = mesh_dim.x; int n_local = kidx/ny/nx; int m_local = (kidx-n_local*ny*nx)/nx; int l_local = kidx % nx; // cyclic distribution l = l_local*pdim.x + pidx.x; m = m_local*pdim.y + pidx.y; n = n_local*pdim.z + pidx.z; } #endif // compute Miller indices if (l >= (int)(global_dim.x/2 + global_dim.x%2)) l -= (int) global_dim.x; if (m >= (int)(global_dim.y/2 + global_dim.y%2)) m -= (int) global_dim.y; if (n >= (int)(global_dim.z/2 + global_dim.z%2)) n -= (int) global_dim.z; Scalar val; Scalar3 kval = (Scalar)l*b1+(Scalar)m*b2+(Scalar)n*b3; Scalar3 kH = Scalar(2.0*M_PI)*make_scalar3(Scalar(1.0)/(Scalar)global_dim.x, Scalar(1.0)/(Scalar)global_dim.y, Scalar(1.0)/(Scalar)global_dim.z); Scalar snx = fast::sin(Scalar(0.5)*l*kH.x); Scalar snx2 = snx*snx; Scalar sny = fast::sin(Scalar(0.5)*m*kH.y); Scalar sny2 = sny*sny; Scalar snz = fast::sin(Scalar(0.5)*n*kH.z); Scalar snz2 = snz*snz; Scalar sx(0.0), sy(0.0), sz(0.0); for (int iorder = order-1; iorder >= 0; iorder--) { sx = gf_b[iorder] + sx*snx2; sy = gf_b[iorder] + sy*sny2; sz = gf_b[iorder] + sz*snz2; } Scalar denominator = sx*sy*sz; denominator *= denominator; if (l != 0 || m != 0 || n != 0) { Scalar sum1(0.0); Scalar numerator = Scalar(4.0*M_PI)/dot(kval,kval); for (int ix = -nbx; ix <= nbx; ix++) { Scalar qx = ((Scalar)l + (Scalar)ix*global_dim.x); Scalar3 knx = qx*b1; Scalar argx = Scalar(0.5)*qx*kH.x; Scalar wxs = gpu_sinc(argx); Scalar wx(1.0); for (int iorder = 0; iorder < order; ++iorder) { wx *= wxs; } for (int iy = -nby; iy <= nby; iy++) { Scalar qy = ((Scalar)m + (Scalar)iy*global_dim.y); Scalar3 kny = qy*b2; Scalar argy = Scalar(0.5)*qy*kH.y; Scalar wys = gpu_sinc(argy); Scalar wy(1.0); for (int iorder = 0; iorder < order; ++iorder) { wy *= wys; } for (int iz = -nbz; iz <= nbz; iz++) { Scalar qz = ((Scalar)n + (Scalar)iz*global_dim.z); Scalar3 knz = qz*b3; Scalar argz = Scalar(0.5)*qz*kH.z; Scalar wzs = gpu_sinc(argz); Scalar wz(1.0); for (int iorder = 0; iorder < order; ++iorder) { wz *= wzs; } Scalar3 kn = knx + kny + knz; Scalar dot1 = dot(kn, kval); Scalar dot2 = dot(kn, kn)+alpha*alpha; Scalar arg_gauss = Scalar(0.25)*dot2/kappa/kappa; Scalar gauss = exp(-arg_gauss); sum1 += (dot1/dot2) * gauss * wx * wx * wy * wy * wz * wz; } } } val = numerator*sum1/denominator; } else { val = Scalar(0.0); } // write out result d_inf_f[kidx] = val; d_k[kidx] = kval; } void gpu_compute_influence_function(const uint3 mesh_dim, const uint3 global_dim, Scalar *d_inf_f, Scalar3 *d_k, const BoxDim& global_box, const bool local_fft, const uint3 pidx, const uint3 pdim, const Scalar EPS_HOC, Scalar kappa, Scalar alpha, const Scalar *d_gf_b, int order, unsigned int block_size) { // compute reciprocal lattice vectors Scalar3 a1 = global_box.getLatticeVector(0); Scalar3 a2 = global_box.getLatticeVector(1); Scalar3 a3 = global_box.getLatticeVector(2); Scalar V_box = global_box.getVolume(); Scalar3 b1 = Scalar(2.0*M_PI)*make_scalar3(a2.y*a3.z-a2.z*a3.y, a2.z*a3.x-a2.x*a3.z, a2.x*a3.y-a2.y*a3.x)/V_box; Scalar3 b2 = Scalar(2.0*M_PI)*make_scalar3(a3.y*a1.z-a3.z*a1.y, a3.z*a1.x-a3.x*a1.z, a3.x*a1.y-a3.y*a1.x)/V_box; Scalar3 b3 = Scalar(2.0*M_PI)*make_scalar3(a1.y*a2.z-a1.z*a2.y, a1.z*a2.x-a1.x*a2.z, a1.x*a2.y-a1.y*a2.x)/V_box; unsigned int num_wave_vectors = mesh_dim.x*mesh_dim.y*mesh_dim.z; Scalar3 L = global_box.getL(); Scalar temp = floor(((kappa*L.x/(M_PI*global_dim.x)) * pow(-log(EPS_HOC),0.25))); int nbx = (int)temp; temp = floor(((kappa*L.y/(M_PI*global_dim.y)) * pow(-log(EPS_HOC),0.25))); int nby = (int)temp; temp = floor(((kappa*L.z/(M_PI*global_dim.z)) * pow(-log(EPS_HOC),0.25))); int nbz = (int)temp; if (local_fft) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<true>); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); unsigned int n_blocks = num_wave_vectors/run_block_size; if (num_wave_vectors % run_block_size) n_blocks += 1; dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((gpu_compute_influence_function_kernel<true>), dim3(grid), dim3(run_block_size), 0, 0, mesh_dim, num_wave_vectors, global_dim, d_inf_f, d_k, b1, b2, b3, pidx, pdim, nbx, nby, nbz, d_gf_b, order, kappa, alpha); } #ifdef ENABLE_MPI else { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<false>); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(max_block_size, block_size); unsigned int n_blocks = num_wave_vectors/run_block_size; if (num_wave_vectors % run_block_size) n_blocks += 1; dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((gpu_compute_influence_function_kernel<false>), dim3(grid), dim3(run_block_size), 0, 0, mesh_dim, num_wave_vectors, global_dim, d_inf_f, d_k, b1, b2, b3, pidx, pdim, nbx, nby, nbz, d_gf_b, order, kappa, alpha); } #endif } //! The developer has chosen not to document this function __global__ void gpu_fix_exclusions_kernel(Scalar4 *d_force, Scalar *d_virial, const size_t virial_pitch, const Scalar4 *d_pos, const Scalar *d_charge, const BoxDim box, const unsigned int *d_n_neigh, const unsigned int *d_nlist, const Index2D nli, Scalar kappa, Scalar alpha, unsigned int *d_group_members, unsigned int group_size) { // start by identifying which particle we are to handle int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; const Scalar sqrtpi = sqrtf(M_PI); unsigned int n_neigh = d_n_neigh[idx]; Scalar4 postypei = __ldg(d_pos + idx); Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z); Scalar qi = __ldg(d_charge + idx); // initialize the force to 0 Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar virial[6]; for (unsigned int i = 0; i < 6; i++) virial[i] = Scalar(0.0); unsigned int cur_j = 0; // prefetch neighbor index unsigned int next_j = d_nlist[nli(idx, 0)]; for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++) { { // read the current neighbor index (MEM TRANSFER: 4 bytes) // prefetch the next value and set the current one cur_j = next_j; if (neigh_idx+1 < n_neigh) next_j = d_nlist[nli(idx, neigh_idx+1)]; // get the neighbor's position (MEM TRANSFER: 16 bytes) Scalar4 postypej = __ldg(d_pos + cur_j); Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z); Scalar qj = __ldg(d_charge + cur_j); // calculate dr (with periodic boundary conditions) (FLOPS: 3) Scalar3 dx = posi - posj; // apply periodic boundary conditions: (FLOPS 12) dx = box.minImage(dx); // calculate r squared (FLOPS: 5) Scalar rsq = dot(dx,dx); Scalar r = sqrtf(rsq); Scalar qiqj = qi * qj; Scalar expfac = fast::exp(-alpha*r); Scalar arg1 = kappa * r - alpha/Scalar(2.0)/kappa; Scalar arg2 = kappa * r + alpha/Scalar(2.0)/kappa; Scalar erffac = (::erf(arg1)*expfac + expfac - fast::erfc(arg2)*exp(alpha*r))/(Scalar(2.0)*r); Scalar force_divr = qiqj * (expfac*Scalar(2.0)*kappa/sqrtpi*fast::exp(-arg1*arg1) - Scalar(0.5)*alpha*(expfac*::erfc(arg1)+fast::exp(alpha*r)*fast::erfc(arg2)) - erffac)/rsq; // subtract long-range part of pair-interaction Scalar pair_eng = -qiqj * erffac; Scalar force_div2r = Scalar(0.5) * force_divr; virial[0] += dx.x * dx.x * force_div2r; virial[1] += dx.x * dx.y * force_div2r; virial[2] += dx.x * dx.z * force_div2r; virial[3] += dx.y * dx.y * force_div2r; virial[4] += dx.y * dx.z * force_div2r; virial[5] += dx.z * dx.z * force_div2r; force.x += dx.x * force_divr; force.y += dx.y * force_divr; force.z += dx.z * force_divr; force.w += pair_eng; } } force.w *= Scalar(0.5); d_force[idx].x += force.x; d_force[idx].y += force.y; d_force[idx].z += force.z; d_force[idx].w += force.w; for (unsigned int i = 0; i < 6; i++) d_virial[i*virial_pitch+idx] += virial[i]; } } //! The developer has chosen not to document this function hipError_t gpu_fix_exclusions(Scalar4 *d_force, Scalar *d_virial, const size_t virial_pitch, const unsigned int Nmax, const Scalar4 *d_pos, const Scalar *d_charge, const BoxDim& box, const unsigned int *d_n_ex, const unsigned int *d_exlist, const Index2D nex, Scalar kappa, Scalar alpha, unsigned int *d_group_members, unsigned int group_size, int block_size) { dim3 grid( group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); hipLaunchKernelGGL((gpu_fix_exclusions_kernel), dim3(grid), dim3(threads ), 0, 0, d_force, d_virial, virial_pitch, d_pos, d_charge, box, d_n_ex, d_exlist, nex, kappa, alpha, d_group_members, group_size); return hipSuccess; }
ff0bc49ba781e44c7eb36c0cbd45f72546d8d963.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <algorithm> #include <ctime> #include <cmath> #include <math.h> #define NUM (1<<12) #define U 2 #define V 16 using namespace std; typedef struct{ float x,y; } point; float ** generate_set(point a, point b, point c, point *points,point *all_points); float get_distance(point a, point b); __global__ void trilateration(point *a, point *b, point *c, float ** dv, point * pts); int main(int argc, char *argv[]){ srand(time(NULL)); cout << NUM << endl; //point *results =(point *) malloc((NUM/4) * (sizeof(point))); point *points =(point *) malloc((NUM/4) * (sizeof(point))); point *all_points =(point *) malloc((NUM) * (sizeof(point))); point a = {3.4,-2.4}; point b = {5.6,1.23}; point c = {-3.8,5.4}; float ** distance_vector = generate_set(a,b,c,points, all_points); float ** dv; point * da; point * db; point * dc; point * pts; /* hipMalloc(&da, sizeof(point *)); hipMalloc(&db, sizeof(point *)); hipMalloc(&dc, sizeof(point *)); hipMalloc((void **)&pts, (NUM/4) * sizeof(point)); hipMalloc((void **)&dv, NUM*sizeof(float *)); for(int i = 0; i < NUM; i++){ hipMalloc(&dv[i], 3*sizeof(float)); } hipMemcpy(dv, distance_vector, NUM * sizeof(float*), hipMemcpyHostToDevice); hipMemcpy(da, &a, sizeof(point),hipMemcpyHostToDevice); hipMemcpy(db, &b, sizeof(point),hipMemcpyHostToDevice); hipMemcpy(dc, &c, sizeof(point),hipMemcpyHostToDevice); */ hipMallocManaged(&da, sizeof(point *)); hipMallocManaged(&db, sizeof(point *)); hipMallocManaged(&dc, sizeof(point *)); hipMallocManaged(&pts, (NUM) * sizeof(point)); hipMallocManaged(&dv, NUM * sizeof(float *)); for(int i = 0; i < NUM; i++){ hipMallocManaged(&dv[i], 3*sizeof(float)); } *da = a; *db = b; *dc = c; for(int i = 0; i < NUM; i++){ for(int j = 0; j < 3; j++){ dv[i][j] = distance_vector[i][j]; } } point guard = {3.4, -2.4}; point center = {1,1}; cout << "HERE " << get_distance(guard, center) << endl; hipLaunchKernelGGL(( trilateration), dim3(U),dim3(V), 0, 0, da,db,dc,dv,pts); hipDeviceSynchronize(); //hipMemcpy(results, pts, (NUM/4) * sizeof(point),hipMemcpyDeviceToHost); /*for(int i = 0; i < NUM/4; i++){ if(results[i].x != 0) cout << results[i].x << ", " << results[i].y << "\n"; }*/ for(int i = 0; i < 20; i++){ cout << pts[i].x << ", " << pts[i].y << " | Actual point: " << all_points[i].x << ", " << all_points[i].y << "\n"; } /* first points 0.170442, -0.212715 0.642852, -0.825177 1.1408, -1.19354 1.42159, -1.6076 1.62658, -2.18829 1.77826, -2.66518 2.17155, -2.97643 2.65642, -3.23502 3.06065, -3.62799 3.42531, -4.1356 3.96662, -4.42709 4.41263, -4.69845 4.93671, -4.96385 5.46347, -5.28732 6.06384, -5.70538 6.48004, -6.27095 7.05871, -6.8165 7.63242, -7.21735 8.05061, -7.52002 8.4322, -7.82146 8.73583, -8.10203 9.03263, -8.45821 9.40358, -8.86348 9.6679, -9.13161 */ free(points); hipFree(da); hipFree(db); hipFree(dc); hipFree(points); for(int i = 0; i < NUM; i++) hipFree(dv[i]); hipFree(dv); return 0; } float ** generate_set(point a, point b, point c, point *points, point *all_points){ float ** dist = (float **) malloc(NUM * sizeof(float *)); int i,j; for(j = 0; j < NUM; j++){ dist[j] = (float *) malloc(3 * sizeof(float)); for(i = 0; i < 3; i++){ dist[j][i] = 0; } } srand(time(NULL)); float x_ave = 0, y_ave = 0; point next; next.x = 0; next.y = 0; for(i = 0; i < NUM; i++){ dist[i][0] = get_distance(a,next); dist[i][1] = get_distance(b,next); dist[i][2] = get_distance(c,next); all_points[i] = next; //cout << dist[i][0] << "," << dist[i][1] << "," << dist[i][2] << endl; if(i%4 == 0 && i != 0){ point t; t.x = x_ave/4; t.y = y_ave/4; points[(i/4)-1] = t; x_ave = 0; y_ave = 0; //if(i < 100) cout << t.x << ", " << t.y << endl; } x_ave += next.x; y_ave += next.y; //get new point float temp = (rand() % 20000); float delta_x = (temp / 100000); temp = (rand() % 20000); float delta_y = (temp / 100000) - .2; next.x += delta_x; next.y += delta_y; } return dist; } float get_distance(point a, point b){ float distance = sqrt((pow((a.x - b.x),2) + pow((a.y - b.y),2))); return distance; } float norm(point p){ return pow(pow(p.x,2) + pow(p.y,2), .5); } __global__ void trilateration(point *a, point *b, point *c, float ** dv, point * pts){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for(j =0; j < ((NUM)/(U*V*4));j++){ float ave_y = 0, ave_x = 0; float xa = a->x; float ya = a->y; float xb = b->x; float yb = b->y; float xc = c->x; float yc = c->y; float ra = dv[i+ j*(U*V)][0]; float rb = dv[i+ j*(U*V)][1]; float rc = dv[i+ j*(U*V)][2]; float numerator = ((xb - xa) * (xc * xc + yc * yc - rc*rc) + (xa - xc) * (xb * xb + yb * yb - rb * rb) + (xc - xb) * (xa * xa + ya * ya - ra * ra)); float denominator = (2 * (yc *(xb - xa) + yb * (xa - xc) + ya * (xc - xb))); float y = numerator/denominator; float x = (rb * rb + xa * xa + ya * ya - ra * ra - xb * xb - yb * yb - 2*(ya - yb) * y) / (2*(xa -xb)); point ret; ret.x = x; ret.y = y; pts[i + j *U*V] = ret; syncthreads(); } }
ff0bc49ba781e44c7eb36c0cbd45f72546d8d963.cu
#include <stdio.h> #include <iostream> #include <cuda_runtime.h> #include <cuda.h> #include <algorithm> #include <ctime> #include <cmath> #include <math.h> #define NUM (1<<12) #define U 2 #define V 16 using namespace std; typedef struct{ float x,y; } point; float ** generate_set(point a, point b, point c, point *points,point *all_points); float get_distance(point a, point b); __global__ void trilateration(point *a, point *b, point *c, float ** dv, point * pts); int main(int argc, char *argv[]){ srand(time(NULL)); cout << NUM << endl; //point *results =(point *) malloc((NUM/4) * (sizeof(point))); point *points =(point *) malloc((NUM/4) * (sizeof(point))); point *all_points =(point *) malloc((NUM) * (sizeof(point))); point a = {3.4,-2.4}; point b = {5.6,1.23}; point c = {-3.8,5.4}; float ** distance_vector = generate_set(a,b,c,points, all_points); float ** dv; point * da; point * db; point * dc; point * pts; /* cudaMalloc(&da, sizeof(point *)); cudaMalloc(&db, sizeof(point *)); cudaMalloc(&dc, sizeof(point *)); cudaMalloc((void **)&pts, (NUM/4) * sizeof(point)); cudaMalloc((void **)&dv, NUM*sizeof(float *)); for(int i = 0; i < NUM; i++){ cudaMalloc(&dv[i], 3*sizeof(float)); } cudaMemcpy(dv, distance_vector, NUM * sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(da, &a, sizeof(point),cudaMemcpyHostToDevice); cudaMemcpy(db, &b, sizeof(point),cudaMemcpyHostToDevice); cudaMemcpy(dc, &c, sizeof(point),cudaMemcpyHostToDevice); */ cudaMallocManaged(&da, sizeof(point *)); cudaMallocManaged(&db, sizeof(point *)); cudaMallocManaged(&dc, sizeof(point *)); cudaMallocManaged(&pts, (NUM) * sizeof(point)); cudaMallocManaged(&dv, NUM * sizeof(float *)); for(int i = 0; i < NUM; i++){ cudaMallocManaged(&dv[i], 3*sizeof(float)); } *da = a; *db = b; *dc = c; for(int i = 0; i < NUM; i++){ for(int j = 0; j < 3; j++){ dv[i][j] = distance_vector[i][j]; } } point guard = {3.4, -2.4}; point center = {1,1}; cout << "HERE " << get_distance(guard, center) << endl; trilateration<<<U,V>>>(da,db,dc,dv,pts); cudaDeviceSynchronize(); //cudaMemcpy(results, pts, (NUM/4) * sizeof(point),cudaMemcpyDeviceToHost); /*for(int i = 0; i < NUM/4; i++){ if(results[i].x != 0) cout << results[i].x << ", " << results[i].y << "\n"; }*/ for(int i = 0; i < 20; i++){ cout << pts[i].x << ", " << pts[i].y << " | Actual point: " << all_points[i].x << ", " << all_points[i].y << "\n"; } /* first points 0.170442, -0.212715 0.642852, -0.825177 1.1408, -1.19354 1.42159, -1.6076 1.62658, -2.18829 1.77826, -2.66518 2.17155, -2.97643 2.65642, -3.23502 3.06065, -3.62799 3.42531, -4.1356 3.96662, -4.42709 4.41263, -4.69845 4.93671, -4.96385 5.46347, -5.28732 6.06384, -5.70538 6.48004, -6.27095 7.05871, -6.8165 7.63242, -7.21735 8.05061, -7.52002 8.4322, -7.82146 8.73583, -8.10203 9.03263, -8.45821 9.40358, -8.86348 9.6679, -9.13161 */ free(points); cudaFree(da); cudaFree(db); cudaFree(dc); cudaFree(points); for(int i = 0; i < NUM; i++) cudaFree(dv[i]); cudaFree(dv); return 0; } float ** generate_set(point a, point b, point c, point *points, point *all_points){ float ** dist = (float **) malloc(NUM * sizeof(float *)); int i,j; for(j = 0; j < NUM; j++){ dist[j] = (float *) malloc(3 * sizeof(float)); for(i = 0; i < 3; i++){ dist[j][i] = 0; } } srand(time(NULL)); float x_ave = 0, y_ave = 0; point next; next.x = 0; next.y = 0; for(i = 0; i < NUM; i++){ dist[i][0] = get_distance(a,next); dist[i][1] = get_distance(b,next); dist[i][2] = get_distance(c,next); all_points[i] = next; //cout << dist[i][0] << "," << dist[i][1] << "," << dist[i][2] << endl; if(i%4 == 0 && i != 0){ point t; t.x = x_ave/4; t.y = y_ave/4; points[(i/4)-1] = t; x_ave = 0; y_ave = 0; //if(i < 100) cout << t.x << ", " << t.y << endl; } x_ave += next.x; y_ave += next.y; //get new point float temp = (rand() % 20000); float delta_x = (temp / 100000); temp = (rand() % 20000); float delta_y = (temp / 100000) - .2; next.x += delta_x; next.y += delta_y; } return dist; } float get_distance(point a, point b){ float distance = sqrt((pow((a.x - b.x),2) + pow((a.y - b.y),2))); return distance; } float norm(point p){ return pow(pow(p.x,2) + pow(p.y,2), .5); } __global__ void trilateration(point *a, point *b, point *c, float ** dv, point * pts){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for(j =0; j < ((NUM)/(U*V*4));j++){ float ave_y = 0, ave_x = 0; float xa = a->x; float ya = a->y; float xb = b->x; float yb = b->y; float xc = c->x; float yc = c->y; float ra = dv[i+ j*(U*V)][0]; float rb = dv[i+ j*(U*V)][1]; float rc = dv[i+ j*(U*V)][2]; float numerator = ((xb - xa) * (xc * xc + yc * yc - rc*rc) + (xa - xc) * (xb * xb + yb * yb - rb * rb) + (xc - xb) * (xa * xa + ya * ya - ra * ra)); float denominator = (2 * (yc *(xb - xa) + yb * (xa - xc) + ya * (xc - xb))); float y = numerator/denominator; float x = (rb * rb + xa * xa + ya * ya - ra * ra - xb * xb - yb * yb - 2*(ya - yb) * y) / (2*(xa -xb)); point ret; ret.x = x; ret.y = y; pts[i + j *U*V] = ret; syncthreads(); } }
40b4e8ff845b8567cc3667f852b1ea670146d05a.hip
// !!! This is a file automatically generated by hipify!!! #include "sparse_matrix.h" #include "cuda_helper.h" #include <algorithm> template<typename Dtype> SparseMat<GPU, Dtype>::SparseMat() { this->count = this->rows = this->cols = 0; streamid = 0; data = std::make_shared< SpData<GPU, Dtype> >(); hipsparseCreateMatDescr(&descr); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); } template<typename Dtype> SparseMat<GPU, Dtype>::SparseMat(size_t _rows, size_t _cols, unsigned _streamid) { this->rows = _rows; this->cols = _cols; this->count = _rows * _cols; streamid = _streamid; data = std::make_shared< SpData<GPU, Dtype> >(); hipsparseCreateMatDescr(&descr); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); } template<typename Dtype> SparseMat<GPU, Dtype>::~SparseMat() { hipsparseDestroyMatDescr(descr); } template<typename Dtype> void SparseMat<GPU, Dtype>::Resize(size_t newRos, size_t newCols) { this->count = newRos * newCols; this->rows = newRos; this->cols = newCols; } template<typename Dtype> void SparseMat<GPU, Dtype>::ResizeSp(int newNNZ, int newNPtr) { if (newNNZ > data->nzCap || newNPtr > data->ptrCap) { if (newNNZ > data->nzCap) data->nzCap = ::max(newNNZ, data->nzCap * 2); if (newNPtr > data->ptrCap) data->ptrCap = ::max(newNPtr, data->ptrCap * 2); data = std::make_shared< SpData<GPU, Dtype> >(data->nzCap, data->ptrCap); } data->nnz = newNNZ; data->len_ptr = newNPtr; } template<typename Dtype> Dtype SparseMat<GPU, Dtype>::Asum() { return CudaHelper_Asum(GPUHandle::cublashandle, data->nnz, data->val); } template<typename Dtype> void SparseMat<GPU, Dtype>::CopyFrom(SparseMat<CPU, Dtype>& src) { this->rows = src.rows; this->cols = src.cols; this->count = src.count; ResizeSp(src.data->nnz, src.data->len_ptr); hipMemcpyAsync(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, hipMemcpyHostToDevice, GPUHandle::streams[streamid]); hipMemcpyAsync(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, hipMemcpyHostToDevice, GPUHandle::streams[streamid]); hipMemcpyAsync(data->ptr, src.data->ptr, sizeof(int) * src.data->len_ptr, hipMemcpyHostToDevice, GPUHandle::streams[streamid]); } template<typename Dtype> void SparseMat<GPU, Dtype>::CopyFrom(SparseMat<GPU, Dtype>& src) { this->rows = src.rows; this->cols = src.cols; this->count = src.count; ResizeSp(src.data->nnz, src.data->len_ptr); hipMemcpyAsync(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, hipMemcpyDeviceToDevice, GPUHandle::streams[streamid]); hipMemcpyAsync(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, hipMemcpyDeviceToDevice, GPUHandle::streams[streamid]); hipMemcpyAsync(data->ptr, src.data->ptr, sizeof(int) * src.data->len_ptr, hipMemcpyDeviceToDevice, GPUHandle::streams[streamid]); } template<typename Dtype> void SparseMat<GPU, Dtype>::Serialize(FILE* fid) { IMatrix<GPU, Dtype>::Serialize(fid); data->Serialize(fid); } template<typename Dtype> void SparseMat<GPU, Dtype>::Deserialize(FILE* fid) { IMatrix<GPU, Dtype>::Deserialize(fid); data = std::make_shared< SpData<GPU, Dtype> >(); data->Deserialize(fid); } template class SparseMat<GPU, double>; template class SparseMat<GPU, float>;
40b4e8ff845b8567cc3667f852b1ea670146d05a.cu
#include "sparse_matrix.h" #include "cuda_helper.h" #include <algorithm> template<typename Dtype> SparseMat<GPU, Dtype>::SparseMat() { this->count = this->rows = this->cols = 0; streamid = 0; data = std::make_shared< SpData<GPU, Dtype> >(); cusparseCreateMatDescr(&descr); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); } template<typename Dtype> SparseMat<GPU, Dtype>::SparseMat(size_t _rows, size_t _cols, unsigned _streamid) { this->rows = _rows; this->cols = _cols; this->count = _rows * _cols; streamid = _streamid; data = std::make_shared< SpData<GPU, Dtype> >(); cusparseCreateMatDescr(&descr); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); } template<typename Dtype> SparseMat<GPU, Dtype>::~SparseMat() { cusparseDestroyMatDescr(descr); } template<typename Dtype> void SparseMat<GPU, Dtype>::Resize(size_t newRos, size_t newCols) { this->count = newRos * newCols; this->rows = newRos; this->cols = newCols; } template<typename Dtype> void SparseMat<GPU, Dtype>::ResizeSp(int newNNZ, int newNPtr) { if (newNNZ > data->nzCap || newNPtr > data->ptrCap) { if (newNNZ > data->nzCap) data->nzCap = std::max(newNNZ, data->nzCap * 2); if (newNPtr > data->ptrCap) data->ptrCap = std::max(newNPtr, data->ptrCap * 2); data = std::make_shared< SpData<GPU, Dtype> >(data->nzCap, data->ptrCap); } data->nnz = newNNZ; data->len_ptr = newNPtr; } template<typename Dtype> Dtype SparseMat<GPU, Dtype>::Asum() { return CudaHelper_Asum(GPUHandle::cublashandle, data->nnz, data->val); } template<typename Dtype> void SparseMat<GPU, Dtype>::CopyFrom(SparseMat<CPU, Dtype>& src) { this->rows = src.rows; this->cols = src.cols; this->count = src.count; ResizeSp(src.data->nnz, src.data->len_ptr); cudaMemcpyAsync(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, cudaMemcpyHostToDevice, GPUHandle::streams[streamid]); cudaMemcpyAsync(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, cudaMemcpyHostToDevice, GPUHandle::streams[streamid]); cudaMemcpyAsync(data->ptr, src.data->ptr, sizeof(int) * src.data->len_ptr, cudaMemcpyHostToDevice, GPUHandle::streams[streamid]); } template<typename Dtype> void SparseMat<GPU, Dtype>::CopyFrom(SparseMat<GPU, Dtype>& src) { this->rows = src.rows; this->cols = src.cols; this->count = src.count; ResizeSp(src.data->nnz, src.data->len_ptr); cudaMemcpyAsync(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, cudaMemcpyDeviceToDevice, GPUHandle::streams[streamid]); cudaMemcpyAsync(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, cudaMemcpyDeviceToDevice, GPUHandle::streams[streamid]); cudaMemcpyAsync(data->ptr, src.data->ptr, sizeof(int) * src.data->len_ptr, cudaMemcpyDeviceToDevice, GPUHandle::streams[streamid]); } template<typename Dtype> void SparseMat<GPU, Dtype>::Serialize(FILE* fid) { IMatrix<GPU, Dtype>::Serialize(fid); data->Serialize(fid); } template<typename Dtype> void SparseMat<GPU, Dtype>::Deserialize(FILE* fid) { IMatrix<GPU, Dtype>::Deserialize(fid); data = std::make_shared< SpData<GPU, Dtype> >(); data->Deserialize(fid); } template class SparseMat<GPU, double>; template class SparseMat<GPU, float>;
ce2ab79c31a7e4f29337b799ebf5af92a0fdcc97.hip
// !!! This is a file automatically generated by hipify!!! #include <THHUNN/THHUNN.h> #include <THHUNN/common.h> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHThrustAllocator.cuh> #include <THH/THHApply.cuh> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/system/hip/execution_policy.h> #include <c10/macros/Macros.h> template <typename T> inline __host__ __device__ T eps(); template <> inline __host__ __device__ float eps() { return 1e-12f; } template <> inline __host__ __device__ double eps() { return 1e-12; } template <typename T> inline __host__ __device__ T safe_log(T a) { if (a == 0.) { return ::log(eps<T>()); } return ::log(a); } template <typename Dtype, typename Acctype> struct bce_functor { template <class Tuple> __host__ __device__ Acctype operator()(Tuple x) { Dtype input = thrust::get<0>(x); Dtype t = thrust::get<1>(x); CUDA_KERNEL_ASSERT(input >= 0. && input <= 1.); return - (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input)) + (Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input)); } }; template <typename Dtype, typename Acctype> struct bce_updateOutput_no_reduce_functor { __forceinline__ __host__ __device__ void operator()( const Dtype *input, const Dtype *target, Dtype *output) { CUDA_KERNEL_ASSERT(*input >= 0. && *input <= 1.); *output = ScalarConvert<Acctype, Dtype>::to( -(*target * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(*input)) + (Acctype(1) - *target) * safe_log<Acctype>(Acctype(1) - *input))); } }; template <typename Dtype, typename Acctype> struct bce_functor_weights { template <class Tuple> __host__ __device__ Acctype operator()(Tuple x) { Dtype input = thrust::get<0>(x); Dtype t = thrust::get<1>(x); Dtype w = thrust::get<2>(x); CUDA_KERNEL_ASSERT(input >= 0. && input <= 1.); return - w * (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input)) + (Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input)); } }; template <typename Dtype, typename Acctype> struct bce_updateGradInput_no_reduce_functor { __forceinline__ __host__ __device__ void operator()( const Dtype *x, const Dtype *t, Dtype *gradInput) { *gradInput = ScalarConvert<Acctype,Dtype>::to( - (*t - *x) / ((Acctype(1) - *x + eps<Acctype>()) * (*x + eps<Acctype>()))); } }; template <typename Dtype, typename Acctype> struct bce_updateGradInput_functor { const Dtype norm; bce_updateGradInput_functor(Dtype norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ Dtype operator()(Tuple x) { Dtype o = thrust::get<0>(x); Dtype t = thrust::get<1>(x); return ScalarConvert<Acctype,Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm); } }; template <typename Dtype, typename Acctype> struct bce_updateGradInput_functor_weights { const Dtype norm; bce_updateGradInput_functor_weights(Dtype norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ Dtype operator()(Tuple x) { Dtype o = thrust::get<0>(x); Dtype t = thrust::get<1>(x); Dtype w = thrust::get<2>(x); return ScalarConvert<Acctype, Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm * w); } }; #include <THHUNN/generic/BCECriterion.hip> #include <THH/THHGenerateFloatTypes.h>
ce2ab79c31a7e4f29337b799ebf5af92a0fdcc97.cu
#include <THCUNN/THCUNN.h> #include <THCUNN/common.h> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCThrustAllocator.cuh> #include <THC/THCApply.cuh> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/system/cuda/execution_policy.h> #include <c10/macros/Macros.h> template <typename T> inline __host__ __device__ T eps(); template <> inline __host__ __device__ float eps() { return 1e-12f; } template <> inline __host__ __device__ double eps() { return 1e-12; } template <typename T> inline __host__ __device__ T safe_log(T a) { if (a == 0.) { return std::log(eps<T>()); } return std::log(a); } template <typename Dtype, typename Acctype> struct bce_functor { template <class Tuple> __host__ __device__ Acctype operator()(Tuple x) { Dtype input = thrust::get<0>(x); Dtype t = thrust::get<1>(x); CUDA_KERNEL_ASSERT(input >= 0. && input <= 1.); return - (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input)) + (Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input)); } }; template <typename Dtype, typename Acctype> struct bce_updateOutput_no_reduce_functor { __forceinline__ __host__ __device__ void operator()( const Dtype *input, const Dtype *target, Dtype *output) { CUDA_KERNEL_ASSERT(*input >= 0. && *input <= 1.); *output = ScalarConvert<Acctype, Dtype>::to( -(*target * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(*input)) + (Acctype(1) - *target) * safe_log<Acctype>(Acctype(1) - *input))); } }; template <typename Dtype, typename Acctype> struct bce_functor_weights { template <class Tuple> __host__ __device__ Acctype operator()(Tuple x) { Dtype input = thrust::get<0>(x); Dtype t = thrust::get<1>(x); Dtype w = thrust::get<2>(x); CUDA_KERNEL_ASSERT(input >= 0. && input <= 1.); return - w * (t * safe_log<Acctype>(ScalarConvert<Dtype, Acctype>::to(input)) + (Acctype(1) - t) * safe_log<Acctype>(Acctype(1) - input)); } }; template <typename Dtype, typename Acctype> struct bce_updateGradInput_no_reduce_functor { __forceinline__ __host__ __device__ void operator()( const Dtype *x, const Dtype *t, Dtype *gradInput) { *gradInput = ScalarConvert<Acctype,Dtype>::to( - (*t - *x) / ((Acctype(1) - *x + eps<Acctype>()) * (*x + eps<Acctype>()))); } }; template <typename Dtype, typename Acctype> struct bce_updateGradInput_functor { const Dtype norm; bce_updateGradInput_functor(Dtype norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ Dtype operator()(Tuple x) { Dtype o = thrust::get<0>(x); Dtype t = thrust::get<1>(x); return ScalarConvert<Acctype,Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm); } }; template <typename Dtype, typename Acctype> struct bce_updateGradInput_functor_weights { const Dtype norm; bce_updateGradInput_functor_weights(Dtype norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ Dtype operator()(Tuple x) { Dtype o = thrust::get<0>(x); Dtype t = thrust::get<1>(x); Dtype w = thrust::get<2>(x); return ScalarConvert<Acctype, Dtype>::to(- (t - o) / ((Acctype(1) - o + eps<Acctype>()) * (o + eps<Acctype>())) * norm * w); } }; #include <THCUNN/generic/BCECriterion.cu> #include <THC/THCGenerateFloatTypes.h>
8d8e4dd215056926a2e88e243661068a57c51e45.hip
// !!! This is a file automatically generated by hipify!!! // Ceres Solver - A fast non-linear least squares minimizer // Copyright 2022 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: [email protected] (Joydeep Biswas) #include "hip/hip_runtime.h" namespace ceres::internal { // As the CUDA Toolkit documentation says, "although arbitrary in this case, is // a common choice". This is determined by the warp size, max block size, and // multiprocessor sizes of recent GPUs. For complex kernels with significant // register usage and unusual memory patterns, the occupancy calculator API // might provide better performance. See "Occupancy Calculator" under the CUDA // toolkit documentation. constexpr int kCudaBlockSize = 256; template<typename SrcType, typename DstType> __global__ void TypeConversionKernel(const SrcType* __restrict__ input, DstType* __restrict__ output, const int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { output[i] = static_cast<DstType>(input[i]); } } void CudaFP64ToFP32(const double* input, float* output, const int size, hipStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; hipLaunchKernelGGL(( TypeConversionKernel<double, float>) , dim3(num_blocks), dim3(kCudaBlockSize), 0, stream, input, output, size); } void CudaFP32ToFP64(const float* input, double* output, const int size, hipStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; hipLaunchKernelGGL(( TypeConversionKernel<float, double>) , dim3(num_blocks), dim3(kCudaBlockSize), 0, stream, input, output, size); } template<typename T> __global__ void SetZeroKernel(T* __restrict__ output, const int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { output[i] = T(0.0); } } void CudaSetZeroFP32(float* output, const int size, hipStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; hipLaunchKernelGGL(( SetZeroKernel<float>), dim3(num_blocks), dim3(kCudaBlockSize), 0, stream, output, size); } void CudaSetZeroFP64(double* output, const int size, hipStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; hipLaunchKernelGGL(( SetZeroKernel<double>), dim3(num_blocks), dim3(kCudaBlockSize), 0, stream, output, size); } template <typename SrcType, typename DstType> __global__ void XPlusEqualsYKernel(DstType* __restrict__ x, const SrcType* __restrict__ y, const int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { x[i] = x[i] + DstType(y[i]); } } void CudaDsxpy(double* x, float* y, const int size, hipStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; hipLaunchKernelGGL(( XPlusEqualsYKernel<float, double>) , dim3(num_blocks), dim3(kCudaBlockSize), 0, stream, x, y, size); } } // namespace ceres_cuda_kernels
8d8e4dd215056926a2e88e243661068a57c51e45.cu
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2022 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: [email protected] (Joydeep Biswas) #include "cuda_runtime.h" namespace ceres::internal { // As the CUDA Toolkit documentation says, "although arbitrary in this case, is // a common choice". This is determined by the warp size, max block size, and // multiprocessor sizes of recent GPUs. For complex kernels with significant // register usage and unusual memory patterns, the occupancy calculator API // might provide better performance. See "Occupancy Calculator" under the CUDA // toolkit documentation. constexpr int kCudaBlockSize = 256; template<typename SrcType, typename DstType> __global__ void TypeConversionKernel(const SrcType* __restrict__ input, DstType* __restrict__ output, const int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { output[i] = static_cast<DstType>(input[i]); } } void CudaFP64ToFP32(const double* input, float* output, const int size, cudaStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; TypeConversionKernel<double, float> <<<num_blocks, kCudaBlockSize, 0, stream>>>(input, output, size); } void CudaFP32ToFP64(const float* input, double* output, const int size, cudaStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; TypeConversionKernel<float, double> <<<num_blocks, kCudaBlockSize, 0, stream>>>(input, output, size); } template<typename T> __global__ void SetZeroKernel(T* __restrict__ output, const int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { output[i] = T(0.0); } } void CudaSetZeroFP32(float* output, const int size, cudaStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; SetZeroKernel<float><<<num_blocks, kCudaBlockSize, 0, stream>>>(output, size); } void CudaSetZeroFP64(double* output, const int size, cudaStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; SetZeroKernel<double><<<num_blocks, kCudaBlockSize, 0, stream>>>( output, size); } template <typename SrcType, typename DstType> __global__ void XPlusEqualsYKernel(DstType* __restrict__ x, const SrcType* __restrict__ y, const int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { x[i] = x[i] + DstType(y[i]); } } void CudaDsxpy(double* x, float* y, const int size, cudaStream_t stream) { const int num_blocks = (size + kCudaBlockSize - 1) / kCudaBlockSize; XPlusEqualsYKernel<float, double> <<<num_blocks, kCudaBlockSize, 0, stream>>>(x, y, size); } } // namespace ceres_cuda_kernels
9a32dd3f725c917fc5495f9729e9022e52b04704.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #ifndef NDEBUG #define CHECK_STATUS(status) \ if (status != hipSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ hipGetErrorString(status)) #else #define CHECK_STATUS(status) status #endif ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void MyKernel(){ } int main(int argc, char **argv) { CHECK_STATUS(hipSetDevice(0)); // 0 hipStream_t s0; hipStreamCreate(&s0); // 0s0 hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(64), 0, s0, ); // 0s0MyKernel CHECK_STATUS(hipGetLastError()); CHECK_STATUS(hipSetDevice(1)); // 1 hipStream_t s1; hipStreamCreate(&s1); // 1s1 hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(64), 0, s1, ); // 1s1MyKernel CHECK_STATUS(hipGetLastError()); // hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(64), 0, s0, ); // 1s0MyKernel // 1. // 2.hipEventRecord() // 3.hipEventElapsedTime() // 4.hipEventSynchronize()cudaEventQuery() // 5.hipStreamWaitEvent() CHECK_STATUS(hipStreamDestroy(s0)); CHECK_STATUS(hipStreamDestroy(s1)); return 0; }
9a32dd3f725c917fc5495f9729e9022e52b04704.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #ifndef NDEBUG #define CHECK_STATUS(status) \ if (status != cudaSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ cudaGetErrorString(status)) #else #define CHECK_STATUS(status) status #endif ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void MyKernel(){ } int main(int argc, char **argv) { CHECK_STATUS(cudaSetDevice(0)); // 选择设备0 cudaStream_t s0; cudaStreamCreate(&s0); // 创建与设备0关联的流s0 MyKernel<<<100, 64, 0, s0>>>(); // 在设备0的s0上运行MyKernel CHECK_STATUS(cudaGetLastError()); CHECK_STATUS(cudaSetDevice(1)); // 选择设备1 cudaStream_t s1; cudaStreamCreate(&s1); // 创建与设备1关联的流s1 MyKernel<<<100, 64, 0, s1>>>(); // 在设备1的s1上运行MyKernel CHECK_STATUS(cudaGetLastError()); // 这个调用会失败 MyKernel<<<100, 64, 0, s0>>>(); // 在设备1上,在s0上运行MyKernel // 1.内存复制会成功 // 2.如果流和事件关联的设备不一样,cudaEventRecord()会失败 // 3.如果输入的两时间关联的设备不同,cudaEventElapsedTime()会失败 // 4.cudaEventSynchronize()和cudaEventQuery()会成功 // 5.cudaStreamWaitEvent()会成功 CHECK_STATUS(cudaStreamDestroy(s0)); CHECK_STATUS(cudaStreamDestroy(s1)); return 0; }
8b9ab57d5835b1ea39277c20ff51fe1de9d48e26.hip
// !!! This is a file automatically generated by hipify!!! /* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.3 * copyright (c) 2019, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre * Date: October 2019 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]), ([email protected]) * All rights reserved */ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "gpu_tensor.h" #include "gpu_kernels.h" #include "gpu_hw.h" /* we need these includes for CUDA's random number stuff */ #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "../../tensor/tensor.h" #include "../../descriptors/descriptors.h" float* gpu_get_uniforms(int N){ /* CUDA's random number library uses hiprandState_t to keep track of the seed value we will store a random state for every thread */ hiprandState_t* states; /* allocate space on the GPU for the random states */ hipMalloc((void**) &states, N * sizeof(hiprandState_t)); /* invoke the GPU to initialize all of the random states */ hipLaunchKernelGGL(( init), dim3(N), dim3(1), 0, 0, time(0), states); /* allocate an array of unsigned ints on the CPU and GPU */ float* gpu_nums; hipMalloc((void**) &gpu_nums, N * sizeof(float)); /* invoke the kernel to get some random numbers */ hipLaunchKernelGGL(( random_uniform), dim3(N), dim3(1), 0, 0, states, gpu_nums); /* free the memory we allocated for the states and numbers */ hipFree(states); // hipFree(gpu_nums); return gpu_nums; } void gpu_rand_uniform(Tensor *A, float v){ int device=A->gpu_device; hipSetDevice(device); check_curand(hiprandGenerateUniform(random_generator[device],A->ptr,A->size),"gpu_rand_uniform"); check_cuda(hipDeviceSynchronize(),"gpu_rand_uniform"); //gpu_mult_(A, v); check_cuda(hipDeviceSynchronize(),"gpu_rand_uniform"); } void gpu_rand_signed_uniform(Tensor *A, float v){ int device=A->gpu_device; hipSetDevice(device); check_curand(hiprandGenerateUniform(random_generator[device],A->ptr,A->size),"gpu_rand_signed_uniform"); check_cuda(hipDeviceSynchronize(),"gpu_rand_signed_uniform"); gpu_mult_(A, 2*v); gpu_add_(A, -v); check_cuda(hipDeviceSynchronize(),"gpu_rand_signed_uniform"); } void gpu_rand_normal(Tensor *A, float m, float s){ int device=A->gpu_device; hipSetDevice(device); if (A->size%2) { gpu_fill_(A, 0.0); check_curand(hiprandGenerateNormal(random_generator[device],A->ptr,A->size-1,m,s),"gpu_rand_normal"); } else check_curand(hiprandGenerateNormal(random_generator[device],A->ptr,A->size,m,s),"gpu_rand_normal"); check_cuda(hipDeviceSynchronize(),"gpu_rand_normal"); } void gpu_rand_binary(Tensor *A, float v){ int device=A->gpu_device; hipSetDevice(device); check_curand(hiprandGenerateUniform(random_generator[device],A->ptr,A->size),"gpu_rand_binary"); gpu_mask(A,v); check_cuda(hipDeviceSynchronize(),"gpu_rand_binary"); }
8b9ab57d5835b1ea39277c20ff51fe1de9d48e26.cu
/* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.3 * copyright (c) 2019, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre * Date: October 2019 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]), ([email protected]) * All rights reserved */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas_v2.h> #include "gpu_tensor.h" #include "gpu_kernels.h" #include "gpu_hw.h" /* we need these includes for CUDA's random number stuff */ #include <curand.h> #include <curand_kernel.h> #include "../../tensor/tensor.h" #include "../../descriptors/descriptors.h" float* gpu_get_uniforms(int N){ /* CUDA's random number library uses curandState_t to keep track of the seed value we will store a random state for every thread */ curandState_t* states; /* allocate space on the GPU for the random states */ cudaMalloc((void**) &states, N * sizeof(curandState_t)); /* invoke the GPU to initialize all of the random states */ init<<<N, 1>>>(time(0), states); /* allocate an array of unsigned ints on the CPU and GPU */ float* gpu_nums; cudaMalloc((void**) &gpu_nums, N * sizeof(float)); /* invoke the kernel to get some random numbers */ random_uniform<<<N, 1>>>(states, gpu_nums); /* free the memory we allocated for the states and numbers */ cudaFree(states); // cudaFree(gpu_nums); return gpu_nums; } void gpu_rand_uniform(Tensor *A, float v){ int device=A->gpu_device; cudaSetDevice(device); check_curand(curandGenerateUniform(random_generator[device],A->ptr,A->size),"gpu_rand_uniform"); check_cuda(cudaDeviceSynchronize(),"gpu_rand_uniform"); //gpu_mult_(A, v); check_cuda(cudaDeviceSynchronize(),"gpu_rand_uniform"); } void gpu_rand_signed_uniform(Tensor *A, float v){ int device=A->gpu_device; cudaSetDevice(device); check_curand(curandGenerateUniform(random_generator[device],A->ptr,A->size),"gpu_rand_signed_uniform"); check_cuda(cudaDeviceSynchronize(),"gpu_rand_signed_uniform"); gpu_mult_(A, 2*v); gpu_add_(A, -v); check_cuda(cudaDeviceSynchronize(),"gpu_rand_signed_uniform"); } void gpu_rand_normal(Tensor *A, float m, float s){ int device=A->gpu_device; cudaSetDevice(device); if (A->size%2) { gpu_fill_(A, 0.0); check_curand(curandGenerateNormal(random_generator[device],A->ptr,A->size-1,m,s),"gpu_rand_normal"); } else check_curand(curandGenerateNormal(random_generator[device],A->ptr,A->size,m,s),"gpu_rand_normal"); check_cuda(cudaDeviceSynchronize(),"gpu_rand_normal"); } void gpu_rand_binary(Tensor *A, float v){ int device=A->gpu_device; cudaSetDevice(device); check_curand(curandGenerateUniform(random_generator[device],A->ptr,A->size),"gpu_rand_binary"); gpu_mask(A,v); check_cuda(cudaDeviceSynchronize(),"gpu_rand_binary"); }
986ee501eb0dc5b491d1ee09f911e8973ea844bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "bfs_kernels_hip.cuh" __device__ unsigned terminate; __managed__ unsigned numActiveThreads; __global__ void BFSKernel1( size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E, unsigned *F, unsigned *X, unsigned *C, unsigned *Fu) { unsigned activeMaskIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // If vertex is active at current iteration if (activeMaskIdx < numActiveThreads) { unsigned v = activeMask[activeMaskIdx]; // Remove v from current frontier F[v] = FALSE; // Iterate over v's neighbors for (unsigned edge = V[v]; edge < V[v+1]; ++edge) { unsigned neighbor = E[edge]; // If neighbor wasn't visited if (not X[neighbor]) { C[neighbor] = C[v] + 1; Fu[neighbor] = TRUE; } } } } __global__ void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; // If vertex v exists and has recently joined the frontier if (v < graphSize and Fu[v]) { // Copy the new frontier into F F[v] = TRUE; // Set v as visited X[v] = TRUE; // Clean up the new frontier Fu[v] = FALSE; terminate = FALSE; } } // Very slow but correct "active mask" calculation; for debugging __global__ void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask) { numActiveThreads = 0; for (int i = 0; i < graphSize; ++i) { if (F[i]) { activeMask[numActiveThreads] = i; ++numActiveThreads; } } }
986ee501eb0dc5b491d1ee09f911e8973ea844bc.cu
#include "bfs_kernels.cuh" __device__ unsigned terminate; __managed__ unsigned numActiveThreads; __global__ void BFSKernel1( size_t graphSize, unsigned *activeMask, unsigned *V, unsigned *E, unsigned *F, unsigned *X, unsigned *C, unsigned *Fu) { unsigned activeMaskIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // If vertex is active at current iteration if (activeMaskIdx < numActiveThreads) { unsigned v = activeMask[activeMaskIdx]; // Remove v from current frontier F[v] = FALSE; // Iterate over v's neighbors for (unsigned edge = V[v]; edge < V[v+1]; ++edge) { unsigned neighbor = E[edge]; // If neighbor wasn't visited if (not X[neighbor]) { C[neighbor] = C[v] + 1; Fu[neighbor] = TRUE; } } } } __global__ void BFSKernel2(size_t graphSize, unsigned *F, unsigned *X, unsigned *Fu) { int v = blockIdx.x * BLOCK_SIZE + threadIdx.x; // If vertex v exists and has recently joined the frontier if (v < graphSize and Fu[v]) { // Copy the new frontier into F F[v] = TRUE; // Set v as visited X[v] = TRUE; // Clean up the new frontier Fu[v] = FALSE; terminate = FALSE; } } // Very slow but correct "active mask" calculation; for debugging __global__ void getActiveMaskTemp(size_t graphSize, unsigned *F, unsigned *activeMask) { numActiveThreads = 0; for (int i = 0; i < graphSize; ++i) { if (F[i]) { activeMask[numActiveThreads] = i; ++numActiveThreads; } } }
62a6eedf359e3ab33d43dba0d57d9c86c7ab85b2.hip
// !!! This is a file automatically generated by hipify!!! %%cu #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> using namespace std; #define N (1<<24) #define BLOCK_DIM 256 #define RANDOM_MAX 100 #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ } double seconds(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } void initialData(int *data, int size) { srand(0); for (int i = 0; i < size; i++) { data[i] = (int)(rand()) / RANDOM_MAX; } } int reduceOnHost(int *data, int const size){ if (size == 1) return data[0]; if (size % 2 == 1) { data[0] += data[size]; } int const stride = size / 2; for (int i = 0; i < stride; i++){ data[i] += data[i + stride]; } return reduceOnHost(data, stride); } __global__ void reduceUseSMEM(int *in, int *out, int n){ // Each block loads data from GMEM to SMEM __shared__ int blkData[2 * 256]; int numElemsBeforeBlk = blockIdx.x * blockDim.x * 2; blkData[threadIdx.x] = in[numElemsBeforeBlk + threadIdx.x]; blkData[blockDim.x + threadIdx.x] = in[numElemsBeforeBlk + blockDim.x + threadIdx.x]; __syncthreads(); // Each block does reduction with data on SMEM for (int stride = blockDim.x; stride > 0; stride /= 2){ if (threadIdx.x < stride){ blkData[threadIdx.x] += blkData[threadIdx.x + stride]; } __syncthreads(); // Synchronize within threadblock } // Each block writes result from SMEM to GMEM if (threadIdx.x == 0) out[blockIdx.x] = blkData[0]; } int main() { int *in; int hostRes = 0, deviceRes = 0; size_t nBytes = N * sizeof(int); in = (int *)malloc(nBytes); initialData(in, N); dim3 blockSize(BLOCK_DIM); dim3 gridSize((N - 1) / (blockSize.x * 2) + 1); // malloc device global memory int *d_in, *d_out, *out; out = (int *)malloc(gridSize.x * sizeof(int)); CHECK(hipMalloc(&d_in, nBytes)); CHECK(hipMalloc(&d_out, gridSize.x * sizeof(int))); CHECK(hipMemcpy(d_in, in, nBytes, hipMemcpyHostToDevice)); double iStart, iElaps; iStart = seconds(); hostRes = reduceOnHost(in, N); iElaps = seconds() - iStart; printf("reduceOnHost : %f sec\n", iElaps); // ######################################### iStart = seconds(); hipLaunchKernelGGL(( reduceUseSMEM), dim3(gridSize), dim3(blockSize), 0, 0, d_in, d_out, N); CHECK(hipMemcpy(out, d_out, gridSize.x * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < gridSize.x; i++){ deviceRes += out[i]; } iElaps = seconds() - iStart; printf("reduceUseSMEM : %f sec\n", iElaps); if (hostRes != deviceRes){ printf("%d != %d", hostRes, deviceRes); }else{ printf("%d == %d", hostRes, deviceRes); } // Cleanup hipFree(d_in); hipFree(d_out); free(in); free(out); return 0; }
62a6eedf359e3ab33d43dba0d57d9c86c7ab85b2.cu
%%cu #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <sys/time.h> using namespace std; #define N (1<<24) #define BLOCK_DIM 256 #define RANDOM_MAX 100 #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } double seconds(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } void initialData(int *data, int size) { srand(0); for (int i = 0; i < size; i++) { data[i] = (int)(rand()) / RANDOM_MAX; } } int reduceOnHost(int *data, int const size){ if (size == 1) return data[0]; if (size % 2 == 1) { data[0] += data[size]; } int const stride = size / 2; for (int i = 0; i < stride; i++){ data[i] += data[i + stride]; } return reduceOnHost(data, stride); } __global__ void reduceUseSMEM(int *in, int *out, int n){ // Each block loads data from GMEM to SMEM __shared__ int blkData[2 * 256]; int numElemsBeforeBlk = blockIdx.x * blockDim.x * 2; blkData[threadIdx.x] = in[numElemsBeforeBlk + threadIdx.x]; blkData[blockDim.x + threadIdx.x] = in[numElemsBeforeBlk + blockDim.x + threadIdx.x]; __syncthreads(); // Each block does reduction with data on SMEM for (int stride = blockDim.x; stride > 0; stride /= 2){ if (threadIdx.x < stride){ blkData[threadIdx.x] += blkData[threadIdx.x + stride]; } __syncthreads(); // Synchronize within threadblock } // Each block writes result from SMEM to GMEM if (threadIdx.x == 0) out[blockIdx.x] = blkData[0]; } int main() { int *in; int hostRes = 0, deviceRes = 0; size_t nBytes = N * sizeof(int); in = (int *)malloc(nBytes); initialData(in, N); dim3 blockSize(BLOCK_DIM); dim3 gridSize((N - 1) / (blockSize.x * 2) + 1); // malloc device global memory int *d_in, *d_out, *out; out = (int *)malloc(gridSize.x * sizeof(int)); CHECK(cudaMalloc(&d_in, nBytes)); CHECK(cudaMalloc(&d_out, gridSize.x * sizeof(int))); CHECK(cudaMemcpy(d_in, in, nBytes, cudaMemcpyHostToDevice)); double iStart, iElaps; iStart = seconds(); hostRes = reduceOnHost(in, N); iElaps = seconds() - iStart; printf("reduceOnHost : %f sec\n", iElaps); // ######################################### iStart = seconds(); reduceUseSMEM<<<gridSize, blockSize>>>(d_in, d_out, N); CHECK(cudaMemcpy(out, d_out, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < gridSize.x; i++){ deviceRes += out[i]; } iElaps = seconds() - iStart; printf("reduceUseSMEM : %f sec\n", iElaps); if (hostRes != deviceRes){ printf("%d != %d", hostRes, deviceRes); }else{ printf("%d == %d", hostRes, deviceRes); } // Cleanup cudaFree(d_in); cudaFree(d_out); free(in); free(out); return 0; }
4bed8014c474b86f825b3e17fe6413b1960e1fb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <rocblas.h> #include <string> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/batch_fc_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { using framework::Tensor; const int CUDA_NUM_THREADS = 1024; static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __global__ void add_bias_kernel(T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * ins_num * out_dim) { int block_len = ins_num * out_dim; int slot_index = idx / block_len; int out_dim_index = (idx % block_len) % out_dim; T temp = data[idx] + bias[slot_index * out_dim + out_dim_index]; data[idx] = temp; } } template <typename T> void add_bias(hipStream_t stream, T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { hipLaunchKernelGGL(( add_bias_kernel), dim3(GET_BLOCKS(slot_pairs_num * ins_num * out_dim)), dim3(CUDA_NUM_THREADS), 0, stream, data, slot_pairs_num, ins_num, out_dim, bias); } template <typename T> __global__ void add_bias_grad_kernel(const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * out_dim) { int row = idx / out_dim; int col = idx % out_dim; T temp = static_cast<T>(0); for (int i = 0; i < ins_num; ++i) { int select_indx = ((row + 1) * i + 1) * col; temp += dout_data[select_indx]; } db_data[idx] += temp; } } template <typename T> void add_bias_grad(hipStream_t stream, const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { hipLaunchKernelGGL(( add_bias_grad_kernel), dim3(GET_BLOCKS(slot_pairs_num * out_dim)), dim3(CUDA_NUM_THREADS), 0, stream, dout_data, slot_pairs_num, ins_num, out_dim, db_data); } template <typename DeviceContext, typename T> class BatchFCCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { // X.dim = slot_pairs_num * ins_num * in_dim // W.dim = slot_pairs_num * in_dim * out_dim // b.dim = slot_pairs_num * out_dim // output.dim = slot_pairs_num * ins_num * out_dim auto* input = ctx.Input<framework::LoDTensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* bias = ctx.Input<Tensor>("Bias"); auto* output = ctx.Output<framework::LoDTensor>("Out"); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; // get data ptr const T* in_data = input->data<T>(); const T* w_data = w->data<T>(); const T* bias_data = bias->data<T>(); output->Resize({slot_pairs_num, ins_num, out_dim}); T* out_data = output->mutable_data<T>(ctx.GetPlace()); // initialize auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto& place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); out_eigen.device(place) = out_eigen.constant(static_cast<T>(0)); CBLAS_TRANSPOSE transA = CblasNoTrans; CBLAS_TRANSPOSE transB = CblasNoTrans; T alpha = 1; T beta = 0; int64_t strideA = ins_num * in_dim; int64_t strideB = in_dim * out_dim; auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); blas.BatchedGEMM(transA, transB, ins_num, out_dim, in_dim, alpha, in_data, w_data, beta, out_data, slot_pairs_num, strideA, strideB); add_bias<T>(ctx.cuda_device_context().stream(), out_data, slot_pairs_num, ins_num, out_dim, bias_data); } }; template <typename DeviceContext, typename T> class BatchFCGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("Input")); auto* dw = ctx.Output<Tensor>(framework::GradVarName("W")); auto* db = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto& place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); // initialize dx->mutable_data<T>(ctx.GetPlace()); auto dx_eigen = framework::EigenVector<T>::Flatten(*dx); dx_eigen.device(place) = dx_eigen.constant(static_cast<T>(0)); dw->mutable_data<T>(ctx.GetPlace()); auto dw_eigen = framework::EigenVector<T>::Flatten(*dw); dw_eigen.device(place) = dw_eigen.constant(static_cast<T>(0)); // get data ptr const T* x_data = input->data<T>(); const T* w_data = w->data<T>(); const T* dout_data = dout->data<T>(); T* dx_data = dx->data<T>(); T* dw_data = dw->data<T>(); db->mutable_data<T>(ctx.GetPlace()); auto db_eigen = framework::EigenVector<T>::Flatten(*db); db_eigen.device(place) = db_eigen.constant(static_cast<T>(0)); T* db_data = db->data<T>(); add_bias_grad<T>(ctx.cuda_device_context().stream(), dout_data, slot_pairs_num, ins_num, out_dim, db_data); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); T alpha = 1; T beta = 0; // dx = dout_data * y^T blas.BatchedGEMM(CblasNoTrans, CblasTrans, ins_num, in_dim, out_dim, alpha, dout_data, w_data, beta, dx_data, slot_pairs_num, ins_num * out_dim, out_dim * in_dim); // dy = x^T * dout_data blas.BatchedGEMM(CblasTrans, CblasNoTrans, in_dim, out_dim, ins_num, alpha, x_data, dout_data, beta, dw_data, slot_pairs_num, in_dim * ins_num, ins_num * out_dim); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using GPUCtx = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(batch_fc, ops::BatchFCCUDAKernel<GPUCtx, float>, ops::BatchFCCUDAKernel<GPUCtx, double>); REGISTER_OP_CUDA_KERNEL(batch_fc_grad, ops::BatchFCGradOpCUDAKernel<GPUCtx, float>, ops::BatchFCGradOpCUDAKernel<GPUCtx, double>);
4bed8014c474b86f825b3e17fe6413b1960e1fb0.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cublas.h> #include <string> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/batch_fc_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { using framework::Tensor; const int CUDA_NUM_THREADS = 1024; static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __global__ void add_bias_kernel(T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * ins_num * out_dim) { int block_len = ins_num * out_dim; int slot_index = idx / block_len; int out_dim_index = (idx % block_len) % out_dim; T temp = data[idx] + bias[slot_index * out_dim + out_dim_index]; data[idx] = temp; } } template <typename T> void add_bias(cudaStream_t stream, T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { add_bias_kernel<<<GET_BLOCKS(slot_pairs_num * ins_num * out_dim), CUDA_NUM_THREADS, 0, stream>>>(data, slot_pairs_num, ins_num, out_dim, bias); } template <typename T> __global__ void add_bias_grad_kernel(const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * out_dim) { int row = idx / out_dim; int col = idx % out_dim; T temp = static_cast<T>(0); for (int i = 0; i < ins_num; ++i) { int select_indx = ((row + 1) * i + 1) * col; temp += dout_data[select_indx]; } db_data[idx] += temp; } } template <typename T> void add_bias_grad(cudaStream_t stream, const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { add_bias_grad_kernel<<<GET_BLOCKS(slot_pairs_num * out_dim), CUDA_NUM_THREADS, 0, stream>>>(dout_data, slot_pairs_num, ins_num, out_dim, db_data); } template <typename DeviceContext, typename T> class BatchFCCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { // X.dim = slot_pairs_num * ins_num * in_dim // W.dim = slot_pairs_num * in_dim * out_dim // b.dim = slot_pairs_num * out_dim // output.dim = slot_pairs_num * ins_num * out_dim auto* input = ctx.Input<framework::LoDTensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* bias = ctx.Input<Tensor>("Bias"); auto* output = ctx.Output<framework::LoDTensor>("Out"); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; // get data ptr const T* in_data = input->data<T>(); const T* w_data = w->data<T>(); const T* bias_data = bias->data<T>(); output->Resize({slot_pairs_num, ins_num, out_dim}); T* out_data = output->mutable_data<T>(ctx.GetPlace()); // initialize auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto& place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); out_eigen.device(place) = out_eigen.constant(static_cast<T>(0)); CBLAS_TRANSPOSE transA = CblasNoTrans; CBLAS_TRANSPOSE transB = CblasNoTrans; T alpha = 1; T beta = 0; int64_t strideA = ins_num * in_dim; int64_t strideB = in_dim * out_dim; auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); blas.BatchedGEMM(transA, transB, ins_num, out_dim, in_dim, alpha, in_data, w_data, beta, out_data, slot_pairs_num, strideA, strideB); add_bias<T>(ctx.cuda_device_context().stream(), out_data, slot_pairs_num, ins_num, out_dim, bias_data); } }; template <typename DeviceContext, typename T> class BatchFCGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("Input")); auto* dw = ctx.Output<Tensor>(framework::GradVarName("W")); auto* db = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto& place = *ctx.template device_context<platform::CUDADeviceContext>() .eigen_device(); // initialize dx->mutable_data<T>(ctx.GetPlace()); auto dx_eigen = framework::EigenVector<T>::Flatten(*dx); dx_eigen.device(place) = dx_eigen.constant(static_cast<T>(0)); dw->mutable_data<T>(ctx.GetPlace()); auto dw_eigen = framework::EigenVector<T>::Flatten(*dw); dw_eigen.device(place) = dw_eigen.constant(static_cast<T>(0)); // get data ptr const T* x_data = input->data<T>(); const T* w_data = w->data<T>(); const T* dout_data = dout->data<T>(); T* dx_data = dx->data<T>(); T* dw_data = dw->data<T>(); db->mutable_data<T>(ctx.GetPlace()); auto db_eigen = framework::EigenVector<T>::Flatten(*db); db_eigen.device(place) = db_eigen.constant(static_cast<T>(0)); T* db_data = db->data<T>(); add_bias_grad<T>(ctx.cuda_device_context().stream(), dout_data, slot_pairs_num, ins_num, out_dim, db_data); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); T alpha = 1; T beta = 0; // dx = dout_data * y^T blas.BatchedGEMM(CblasNoTrans, CblasTrans, ins_num, in_dim, out_dim, alpha, dout_data, w_data, beta, dx_data, slot_pairs_num, ins_num * out_dim, out_dim * in_dim); // dy = x^T * dout_data blas.BatchedGEMM(CblasTrans, CblasNoTrans, in_dim, out_dim, ins_num, alpha, x_data, dout_data, beta, dw_data, slot_pairs_num, in_dim * ins_num, ins_num * out_dim); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using GPUCtx = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(batch_fc, ops::BatchFCCUDAKernel<GPUCtx, float>, ops::BatchFCCUDAKernel<GPUCtx, double>); REGISTER_OP_CUDA_KERNEL(batch_fc_grad, ops::BatchFCGradOpCUDAKernel<GPUCtx, float>, ops::BatchFCGradOpCUDAKernel<GPUCtx, double>);
9af0f0f28e63cd7c57328f49a87073618005f70f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cubicPrefilter.cuh" #include <stdio.h> #include "cutil.h" #include "internal/cubicPrefilter_kernel.cuh" #include "internal/math_func.cuh" #include "hip/driver_types.h" // *************************************************************************** // * Global GPU procedures // *************************************************************************** template<typename floatN> __global__ void SamplesToCoefficients2DX( floatN* image, // in-place processing unsigned int pitch, // width in bytes unsigned int width, // width of the image unsigned int height) // height of the image { // process lines in x-direction const unsigned int y = blockIdx.x * blockDim.x + threadIdx.x; floatN* line = image + y * pitch; //direct access ConvertToInterpolationCoefficients(line, width, 1); } template<typename floatN> __global__ void SamplesToCoefficients2DY( floatN* image, // in-place processing unsigned int pitch, // width in bytes unsigned int width, // width of the image unsigned int height) // height of the image { // process lines in x-direction const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; floatN* line = image + x; //direct access ConvertToInterpolationCoefficients(line, height, pitch); } // *************************************************************************** // * Exported functions // *************************************************************************** //! Convert the pixel values into cubic b-spline coefficients //! @param image pointer to the image bitmap in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width image width in number of pixels //! @param height image height in number of pixels template<typename floatN> hipPitchedPtr CubicBSplinePrefilter2D(floatN* image, unsigned int pitch, unsigned int width, unsigned int height) { hipPitchedPtr imageData; CUDA_SAFE_CALL(hipMalloc3D(&imageData, make_hipExtent(width * sizeof(floatN), height, 1))); CUDA_SAFE_CALL(hipMemcpy2D(imageData.ptr, imageData.pitch, image, sizeof(floatN) * width, sizeof(floatN) * width, height, hipMemcpyHostToDevice)); float stride = imageData.pitch / sizeof(floatN); //assert(fmod(stride, 1.0f) == 0.0f); dim3 dimBlockX(min(PowTwoDivider(height), 64)); dim3 dimGridX(height / dimBlockX.x); hipLaunchKernelGGL(( SamplesToCoefficients2DX<floatN>), dim3(dimGridX), dim3(dimBlockX), 0, 0, (float*)imageData.ptr, (unsigned int)stride, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed"); dim3 dimBlockY(min(PowTwoDivider(width), 64)); dim3 dimGridY(width / dimBlockY.x); hipLaunchKernelGGL(( SamplesToCoefficients2DY<floatN>), dim3(dimGridY), dim3(dimBlockY), 0, 0, (float*)imageData.ptr, (unsigned int)stride, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed"); CUDA_SAFE_CALL(hipMemcpy2D(image, sizeof(floatN) * width, imageData.ptr, imageData.pitch, sizeof(floatN) * width, height, hipMemcpyDeviceToHost)); return imageData; } template hipPitchedPtr CubicBSplinePrefilter2D<float>(float* image, unsigned int pitch, unsigned int width, unsigned int height); //! Convert the pixel values into cubic b-spline coefficients //! @param image pointer to the image bitmap in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width image width in number of pixels //! @param height image height in number of pixels //! @note Prints stopwatch feedback template<typename floatN> void CubicBSplinePrefilter2DTimer(floatN* image, unsigned int pitch, unsigned int width, unsigned int height) { printf("\nCubic B-Spline Prefilter timer:\n"); unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimBlockX(min(PowTwoDivider(height), 64)); dim3 dimGridX(height / dimBlockX.x); hipLaunchKernelGGL(( SamplesToCoefficients2DX<floatN>), dim3(dimGridX), dim3(dimBlockX), 0, 0, image, pitch, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueX = cutGetTimerValue(hTimer); printf("x-direction : %f msec\n", timerValueX); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimBlockY(min(PowTwoDivider(width), 64)); dim3 dimGridY(width / dimBlockY.x); hipLaunchKernelGGL(( SamplesToCoefficients2DY<floatN>), dim3(dimGridY), dim3(dimBlockY), 0, 0, image, pitch, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueY = cutGetTimerValue(hTimer); printf("y-direction : %f msec\n", timerValueY); printf("total : %f msec\n\n", timerValueX+timerValueY); }
9af0f0f28e63cd7c57328f49a87073618005f70f.cu
#include "cubicPrefilter.cuh" #include <stdio.h> #include "cutil.h" #include "internal/cubicPrefilter_kernel.cuh" #include "internal/math_func.cuh" #include "driver_types.h" // *************************************************************************** // * Global GPU procedures // *************************************************************************** template<typename floatN> __global__ void SamplesToCoefficients2DX( floatN* image, // in-place processing unsigned int pitch, // width in bytes unsigned int width, // width of the image unsigned int height) // height of the image { // process lines in x-direction const unsigned int y = blockIdx.x * blockDim.x + threadIdx.x; floatN* line = image + y * pitch; //direct access ConvertToInterpolationCoefficients(line, width, 1); } template<typename floatN> __global__ void SamplesToCoefficients2DY( floatN* image, // in-place processing unsigned int pitch, // width in bytes unsigned int width, // width of the image unsigned int height) // height of the image { // process lines in x-direction const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; floatN* line = image + x; //direct access ConvertToInterpolationCoefficients(line, height, pitch); } // *************************************************************************** // * Exported functions // *************************************************************************** //! Convert the pixel values into cubic b-spline coefficients //! @param image pointer to the image bitmap in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width image width in number of pixels //! @param height image height in number of pixels template<typename floatN> cudaPitchedPtr CubicBSplinePrefilter2D(floatN* image, unsigned int pitch, unsigned int width, unsigned int height) { cudaPitchedPtr imageData; CUDA_SAFE_CALL(cudaMalloc3D(&imageData, make_cudaExtent(width * sizeof(floatN), height, 1))); CUDA_SAFE_CALL(cudaMemcpy2D(imageData.ptr, imageData.pitch, image, sizeof(floatN) * width, sizeof(floatN) * width, height, cudaMemcpyHostToDevice)); float stride = imageData.pitch / sizeof(floatN); //assert(fmod(stride, 1.0f) == 0.0f); dim3 dimBlockX(min(PowTwoDivider(height), 64)); dim3 dimGridX(height / dimBlockX.x); SamplesToCoefficients2DX<floatN><<<dimGridX, dimBlockX>>>((float*)imageData.ptr, (unsigned int)stride, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed"); dim3 dimBlockY(min(PowTwoDivider(width), 64)); dim3 dimGridY(width / dimBlockY.x); SamplesToCoefficients2DY<floatN><<<dimGridY, dimBlockY>>>((float*)imageData.ptr, (unsigned int)stride, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed"); CUDA_SAFE_CALL(cudaMemcpy2D(image, sizeof(floatN) * width, imageData.ptr, imageData.pitch, sizeof(floatN) * width, height, cudaMemcpyDeviceToHost)); return imageData; } template cudaPitchedPtr CubicBSplinePrefilter2D<float>(float* image, unsigned int pitch, unsigned int width, unsigned int height); //! Convert the pixel values into cubic b-spline coefficients //! @param image pointer to the image bitmap in GPU (device) memory //! @param pitch width in bytes (including padding bytes) //! @param width image width in number of pixels //! @param height image height in number of pixels //! @note Prints stopwatch feedback template<typename floatN> void CubicBSplinePrefilter2DTimer(floatN* image, unsigned int pitch, unsigned int width, unsigned int height) { printf("\nCubic B-Spline Prefilter timer:\n"); unsigned int hTimer; CUT_SAFE_CALL(cutCreateTimer(&hTimer)); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimBlockX(min(PowTwoDivider(height), 64)); dim3 dimGridX(height / dimBlockX.x); SamplesToCoefficients2DX<floatN><<<dimGridX, dimBlockX>>>(image, pitch, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueX = cutGetTimerValue(hTimer); printf("x-direction : %f msec\n", timerValueX); CUT_SAFE_CALL(cutResetTimer(hTimer)); CUT_SAFE_CALL(cutStartTimer(hTimer)); dim3 dimBlockY(min(PowTwoDivider(width), 64)); dim3 dimGridY(width / dimBlockY.x); SamplesToCoefficients2DY<floatN><<<dimGridY, dimBlockY>>>(image, pitch, width, height); CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed"); CUT_SAFE_CALL(cutStopTimer(hTimer)); double timerValueY = cutGetTimerValue(hTimer); printf("y-direction : %f msec\n", timerValueY); printf("total : %f msec\n\n", timerValueX+timerValueY); }
5cd381cfa05bd3eed4529afbabe9e7652520d03f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a hipDeviceSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.hip" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "cuPrintf_hip.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) hipMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" hipError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess) return hipErrorInitializationError; hipMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return hipSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; hipFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return hipErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) hipMemset(printfbuf_device, 0, printfbuf_len); return hipSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
5cd381cfa05bd3eed4529afbabe9e7652520d03f.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a cudaThreadSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.cu" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "cuPrintf.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" cudaError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess) return cudaErrorInitializationError; cudaMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return cudaSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; cudaFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return cudaErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) cudaMemset(printfbuf_device, 0, printfbuf_len); return cudaSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
03c5f2ba4f31db2c72d643a811c32918ba60412c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_2D_pos = make_int2(blockIdx.x*blockDim.x+threadIdx.x, blockIdx.y*blockDim.y+threadIdx.y); if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x; float sum = 0.0; for(int f_y = 0; f_y < filterWidth; f_y++){ for(int f_x = 0; f_x < filterWidth; f_x++){ int pix_x = thread_2D_pos.x + f_x - filterWidth/2; int pix_y = thread_2D_pos.y + f_y - filterWidth/2; pix_x = min(max(0, pix_x), numCols -1); pix_y = min(max(0, pix_y), numRows -1); float filter_val = filter[f_y*filterWidth + f_x]; unsigned char img_val = inputChannel[pix_y*numCols + pix_x]; sum += (filter_val*img_val); } } outputChannel[thread_1D_pos] = sum; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2( blockIdx.x*blockDim.x+threadIdx.x, blockIdx.y*blockDim.y+threadIdx.y); const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x; if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows){ return; } unsigned char r = inputImageRGBA[thread_1D_pos].x; unsigned char g = inputImageRGBA[thread_1D_pos].y; unsigned char b = inputImageRGBA[thread_1D_pos].z; redChannel[thread_1D_pos] = r; greenChannel[thread_1D_pos] = g; blueChannel[thread_1D_pos] = b; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float)*filterWidth*filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const int gridWidth = 16; const int blockWidth = (numCols + gridWidth -1 )/gridWidth; const int blockHeight = (numRows + gridWidth - 1)/gridWidth; const dim3 blockSize(blockWidth,blockHeight,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(gridWidth, gridWidth, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols,d_filter,filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols,d_filter,filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols,d_filter,filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
03c5f2ba4f31db2c72d643a811c32918ba60412c.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. const int2 thread_2D_pos = make_int2(blockIdx.x*blockDim.x+threadIdx.x, blockIdx.y*blockDim.y+threadIdx.y); if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x; float sum = 0.0; for(int f_y = 0; f_y < filterWidth; f_y++){ for(int f_x = 0; f_x < filterWidth; f_x++){ int pix_x = thread_2D_pos.x + f_x - filterWidth/2; int pix_y = thread_2D_pos.y + f_y - filterWidth/2; pix_x = min(max(0, pix_x), numCols -1); pix_y = min(max(0, pix_y), numRows -1); float filter_val = filter[f_y*filterWidth + f_x]; unsigned char img_val = inputChannel[pix_y*numCols + pix_x]; sum += (filter_val*img_val); } } outputChannel[thread_1D_pos] = sum; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 thread_2D_pos = make_int2( blockIdx.x*blockDim.x+threadIdx.x, blockIdx.y*blockDim.y+threadIdx.y); const int thread_1D_pos = thread_2D_pos.y*numCols + thread_2D_pos.x; if(thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows){ return; } unsigned char r = inputImageRGBA[thread_1D_pos].x; unsigned char g = inputImageRGBA[thread_1D_pos].y; unsigned char b = inputImageRGBA[thread_1D_pos].z; redChannel[thread_1D_pos] = r; greenChannel[thread_1D_pos] = g; blueChannel[thread_1D_pos] = b; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float)*filterWidth*filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const int gridWidth = 16; const int blockWidth = (numCols + gridWidth -1 )/gridWidth; const int blockHeight = (numRows + gridWidth - 1)/gridWidth; const dim3 blockSize(blockWidth,blockHeight,1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(gridWidth, gridWidth, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize,blockSize>>>(d_red, d_redBlurred, numRows, numCols,d_filter,filterWidth); gaussian_blur<<<gridSize,blockSize>>>(d_green, d_greenBlurred, numRows, numCols,d_filter,filterWidth); gaussian_blur<<<gridSize,blockSize>>>(d_blue, d_blueBlurred, numRows, numCols,d_filter,filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
8f1254ecfb1cbcf23b6fb4df8ba27e2a836d87be.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cumlHandle.hpp> #include <cuml/common/logger.hpp> #include <cuml/neighbors/knn_sparse.hpp> #include <sparse/knn.cuh> #include <cusparse_v2.h> namespace ML { namespace Sparse { void brute_force_knn(raft::handle_t &handle, const int *idx_indptr, const int *idx_indices, const float *idx_data, size_t idx_nnz, int n_idx_rows, int n_idx_cols, const int *query_indptr, const int *query_indices, const float *query_data, size_t query_nnz, int n_query_rows, int n_query_cols, int *output_indices, float *output_dists, int k, size_t batch_size_index, // approx 1M size_t batch_size_query, ML::MetricType metric, float metricArg, bool expanded_form) { auto d_alloc = handle.get_device_allocator(); hipsparseHandle_t cusparse_handle = handle.get_cusparse_handle(); hipStream_t stream = handle.get_stream(); MLCommon::Sparse::Selection::brute_force_knn( idx_indptr, idx_indices, idx_data, idx_nnz, n_idx_rows, n_idx_cols, query_indptr, query_indices, query_data, query_nnz, n_query_rows, n_query_cols, output_indices, output_dists, k, cusparse_handle, d_alloc, stream, batch_size_index, batch_size_query, metric, metricArg, expanded_form); } }; // namespace Sparse }; // namespace ML
8f1254ecfb1cbcf23b6fb4df8ba27e2a836d87be.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cumlHandle.hpp> #include <cuml/common/logger.hpp> #include <cuml/neighbors/knn_sparse.hpp> #include <sparse/knn.cuh> #include <cusparse_v2.h> namespace ML { namespace Sparse { void brute_force_knn(raft::handle_t &handle, const int *idx_indptr, const int *idx_indices, const float *idx_data, size_t idx_nnz, int n_idx_rows, int n_idx_cols, const int *query_indptr, const int *query_indices, const float *query_data, size_t query_nnz, int n_query_rows, int n_query_cols, int *output_indices, float *output_dists, int k, size_t batch_size_index, // approx 1M size_t batch_size_query, ML::MetricType metric, float metricArg, bool expanded_form) { auto d_alloc = handle.get_device_allocator(); cusparseHandle_t cusparse_handle = handle.get_cusparse_handle(); cudaStream_t stream = handle.get_stream(); MLCommon::Sparse::Selection::brute_force_knn( idx_indptr, idx_indices, idx_data, idx_nnz, n_idx_rows, n_idx_cols, query_indptr, query_indices, query_data, query_nnz, n_query_rows, n_query_cols, output_indices, output_dists, k, cusparse_handle, d_alloc, stream, batch_size_index, batch_size_query, metric, metricArg, expanded_form); } }; // namespace Sparse }; // namespace ML
eb28844dcda27ce471a79cb29c207e4db5c3e4ac.hip
// !!! This is a file automatically generated by hipify!!! #include "Renderer.cuh" #include "kernel_hip.cuh" #include <time.h> int main(int argc, char** argv) { Window* window = new Window("Peter's Window", false, 1920, 1080); Renderer renderer; renderer.init(window, 1); renderer.setupScene(); clock_t start, stop; while (!glfwWindowShouldClose(renderer.window->windowHandle)) { start = clock(); renderer.render(launchRender); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; } renderer.cleanup(); }
eb28844dcda27ce471a79cb29c207e4db5c3e4ac.cu
#include "Renderer.cuh" #include "kernel.cuh" #include <time.h> int main(int argc, char** argv) { Window* window = new Window("Peter's Window", false, 1920, 1080); Renderer renderer; renderer.init(window, 1); renderer.setupScene(); clock_t start, stop; while (!glfwWindowShouldClose(renderer.window->windowHandle)) { start = clock(); renderer.render(launchRender); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; } renderer.cleanup(); }
0bc70fb8c67a9789dea2963808de998032dff243.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (learner_param_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (learner_param_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (learner_param_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (learner_param_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(hipMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
0bc70fb8c67a9789dea2963808de998032dff243.cu
/*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (learner_param_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (learner_param_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (learner_param_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (learner_param_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
787a4d291946a884d7f1346f1ed2b9a91c2ffa54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/fvm_types.hpp> #include "cuda_atomic.hpp" #include "cuda_common.hpp" #include "stimulus.hpp" namespace arb { namespace gpu { namespace kernel { __global__ void stimulus_current_impl(int n, stimulus_pp pp) { auto i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { auto t = pp.vec_t_[pp.vec_ci_[i]]; if (t>=pp.delay[i] && t<pp.delay[i]+pp.duration[i]) { // use subtraction because the electrode currents are specified // in terms of current into the compartment cuda_atomic_add(pp.vec_i_+pp.node_index_[i], -pp.weight_[i]*pp.amplitude[i]); } } } } // namespace kernel void stimulus_current_impl(int n, const stimulus_pp& pp) { constexpr unsigned block_dim = 128; const unsigned grid_dim = impl::block_count(n, block_dim); hipLaunchKernelGGL(( kernel::stimulus_current_impl), dim3(grid_dim), dim3(block_dim), 0, 0, n, pp); } } // namespace gpu } // namespace arb
787a4d291946a884d7f1346f1ed2b9a91c2ffa54.cu
#include <arbor/fvm_types.hpp> #include "cuda_atomic.hpp" #include "cuda_common.hpp" #include "stimulus.hpp" namespace arb { namespace gpu { namespace kernel { __global__ void stimulus_current_impl(int n, stimulus_pp pp) { auto i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { auto t = pp.vec_t_[pp.vec_ci_[i]]; if (t>=pp.delay[i] && t<pp.delay[i]+pp.duration[i]) { // use subtraction because the electrode currents are specified // in terms of current into the compartment cuda_atomic_add(pp.vec_i_+pp.node_index_[i], -pp.weight_[i]*pp.amplitude[i]); } } } } // namespace kernel void stimulus_current_impl(int n, const stimulus_pp& pp) { constexpr unsigned block_dim = 128; const unsigned grid_dim = impl::block_count(n, block_dim); kernel::stimulus_current_impl<<<grid_dim, block_dim>>>(n, pp); } } // namespace gpu } // namespace arb
485ff0b7ff99f93e44d17a26add52797e51d9f52.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hipfft.h> __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } int main() { hipLaunchKernelGGL(( print_kernel), dim3(10), dim3(10), 0, 0, ); hipDeviceSynchronize(); }
485ff0b7ff99f93e44d17a26add52797e51d9f52.cu
#include <stdio.h> #include <cuda_runtime.h> #include <cufft.h> __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } int main() { print_kernel<<<10, 10>>>(); cudaDeviceSynchronize(); }
3eb9f9e48b00dd1d3e9332d11e98870f578aa47f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "orttraining/training_ops/rocm/tensor/gather_grad_impl.h" #include "core/providers/rocm/cu_inc/common.cuh" #include "core/providers/rocm/shared_inc/rocm_call.h" #include <hipcub/hipcub.hpp> namespace onnxruntime { namespace rocm { template <typename T> __global__ void _Iota( hipcub::CountingInputIterator<T> input, size_t length, T* output) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(idx, length); output[idx] = input[idx]; } template <typename T, typename Tin, int NumElementsPerThread> __global__ void _GatherGradImpl( const Tin* input, const Tin* indices, const T* grad_output, T* grad_weight, int64_t numel, int64_t input_numel, int64_t param_itrs, int64_t stride) { int idx = blockIdx.x * 4 + threadIdx.y; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { do { for (int itr = 0; itr < param_itrs; ++itr) { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread; const int weight_row = itr * input_numel + ((int)input[idx]) * stride; //the offset of the input const int grad_row = (itr * numel + ((int)indices[idx])) * stride; //the offset of the gradient float gradient[NumElementsPerThread]; float weight[NumElementsPerThread]; #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<float>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<float>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { weight[ii] += gradient[ii]; } #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<T>(weight[ii]); } } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } // Special optimization for the case which the gather is on axis=0 template <typename T, typename Tin, int NumElementsPerThread> __global__ void _GatherAxis0GradImpl( const Tin* input, const Tin* indices, const T* grad_output, T* grad_weight, int64_t numel, int64_t input_numel, int64_t stride) { int idx = blockIdx.x * 4 + threadIdx.y; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread; const int weight_row = ((int)input[idx]) * stride; //the offset of the input float weight[NumElementsPerThread]; for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE/4; if (feature_dim < stride) weight[ii] = static_cast<float>(grad_weight[weight_row + feature_dim]); } do { const int grad_row = ((int)indices[idx]) * stride; //the offset of the gradient float gradient[NumElementsPerThread]; #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE/4; if (feature_dim < stride) { gradient[ii] = static_cast<float>(grad_output[grad_row + feature_dim]); weight[ii] += gradient[ii]; } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE/4; if (feature_dim < stride) grad_weight[weight_row + feature_dim] = static_cast<T>(weight[ii]); } } } template <typename T, typename Tin> void GatherGradImpl( const RocmKernel& rocm_kernel, const T* grad_data, const Tin* indices_data, const int64_t num_indices, const int64_t num_weights, const int64_t stride, T* output_data, const int64_t num_inputs, //The number of input elements starting from the gathering dimension const int64_t param_itrs //The size of dimensions of the data before gathering dimension ) { // allocate intermediate buffers auto original_indices = rocm_kernel.template GetScratchBuffer<Tin>(num_indices); // initialize original_indices with [0, num_indices) { const auto blocks_per_grid = CeilDiv(num_indices, GridDim::maxThreadsPerBlock); hipcub::CountingInputIterator<Tin> counting_input(Tin{}); hipLaunchKernelGGL(_Iota, dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, 0, counting_input, num_indices, original_indices.get()); } auto indices_data_sorted = rocm_kernel.template GetScratchBuffer<Tin>(num_indices); auto original_indices_sorted = rocm_kernel.template GetScratchBuffer<Tin>(num_indices); // sort indices and original indices size_t sort_temp_storage_size_bytes = 0; HIP_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( nullptr, sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); auto sort_temp_storage = rocm_kernel.GetScratchBuffer<void>(sort_temp_storage_size_bytes); HIP_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( sort_temp_storage.get(), sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_indices, 4), CeilDiv(stride, GridDim::maxElementsPerThread * GPU_WARP_SIZE)); if (param_itrs == 1) { hipLaunchKernelGGL(HIP_KERNEL_NAME(_GatherAxis0GradImpl<T, Tin, GridDim::maxElementsPerThread>), dim3(grid), dim3(block), 0, 0, indices_data_sorted.get(), original_indices_sorted.get(), grad_data, output_data, num_indices, num_inputs, stride); } else { hipLaunchKernelGGL(HIP_KERNEL_NAME(_GatherGradImpl<T, Tin, GridDim::maxElementsPerThread>), dim3(grid), dim3(block), 0, 0, indices_data_sorted.get(), original_indices_sorted.get(), grad_data, output_data, num_indices, num_inputs, param_itrs, stride); } } #define SPECIALIZED_GRAD_IMPL2(T) \ template void GatherGradImpl<T, int64_t>( \ const RocmKernel& rocm_kernel, \ const T* grad_data, \ const int64_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); \ template void GatherGradImpl<T, int32_t>( \ const RocmKernel& rocm_kernel, \ const T* grad_data, \ const int32_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); SPECIALIZED_GRAD_IMPL2(float) SPECIALIZED_GRAD_IMPL2(half) } // namespace rocm } // namespace onnxruntime
3eb9f9e48b00dd1d3e9332d11e98870f578aa47f.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "orttraining/training_ops/rocm/tensor/gather_grad_impl.h" #include "core/providers/rocm/cu_inc/common.cuh" #include "core/providers/rocm/shared_inc/rocm_call.h" #include <hipcub/hipcub.hpp> namespace onnxruntime { namespace rocm { template <typename T> __global__ void _Iota( hipcub::CountingInputIterator<T> input, size_t length, T* output) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(idx, length); output[idx] = input[idx]; } template <typename T, typename Tin, int NumElementsPerThread> __global__ void _GatherGradImpl( const Tin* input, const Tin* indices, const T* grad_output, T* grad_weight, int64_t numel, int64_t input_numel, int64_t param_itrs, int64_t stride) { int idx = blockIdx.x * 4 + threadIdx.y; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { do { for (int itr = 0; itr < param_itrs; ++itr) { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread; const int weight_row = itr * input_numel + ((int)input[idx]) * stride; //the offset of the input const int grad_row = (itr * numel + ((int)indices[idx])) * stride; //the offset of the gradient float gradient[NumElementsPerThread]; float weight[NumElementsPerThread]; #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<float>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<float>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { weight[ii] += gradient[ii]; } #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<T>(weight[ii]); } } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } // Special optimization for the case which the gather is on axis=0 template <typename T, typename Tin, int NumElementsPerThread> __global__ void _GatherAxis0GradImpl( const Tin* input, const Tin* indices, const T* grad_output, T* grad_weight, int64_t numel, int64_t input_numel, int64_t stride) { int idx = blockIdx.x * 4 + threadIdx.y; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread; const int weight_row = ((int)input[idx]) * stride; //the offset of the input float weight[NumElementsPerThread]; for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE/4; if (feature_dim < stride) weight[ii] = static_cast<float>(grad_weight[weight_row + feature_dim]); } do { const int grad_row = ((int)indices[idx]) * stride; //the offset of the gradient float gradient[NumElementsPerThread]; #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE/4; if (feature_dim < stride) { gradient[ii] = static_cast<float>(grad_output[grad_row + feature_dim]); weight[ii] += gradient[ii]; } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE/4; if (feature_dim < stride) grad_weight[weight_row + feature_dim] = static_cast<T>(weight[ii]); } } } template <typename T, typename Tin> void GatherGradImpl( const RocmKernel& rocm_kernel, const T* grad_data, const Tin* indices_data, const int64_t num_indices, const int64_t num_weights, const int64_t stride, T* output_data, const int64_t num_inputs, //The number of input elements starting from the gathering dimension const int64_t param_itrs //The size of dimensions of the data before gathering dimension ) { // allocate intermediate buffers auto original_indices = rocm_kernel.template GetScratchBuffer<Tin>(num_indices); // initialize original_indices with [0, num_indices) { const auto blocks_per_grid = CeilDiv(num_indices, GridDim::maxThreadsPerBlock); hipcub::CountingInputIterator<Tin> counting_input(Tin{}); hipLaunchKernelGGL(_Iota, dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, 0, counting_input, num_indices, original_indices.get()); } auto indices_data_sorted = rocm_kernel.template GetScratchBuffer<Tin>(num_indices); auto original_indices_sorted = rocm_kernel.template GetScratchBuffer<Tin>(num_indices); // sort indices and original indices size_t sort_temp_storage_size_bytes = 0; HIP_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( nullptr, sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); auto sort_temp_storage = rocm_kernel.GetScratchBuffer<void>(sort_temp_storage_size_bytes); HIP_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( sort_temp_storage.get(), sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_indices, 4), CeilDiv(stride, GridDim::maxElementsPerThread * GPU_WARP_SIZE)); if (param_itrs == 1) { hipLaunchKernelGGL(HIP_KERNEL_NAME(_GatherAxis0GradImpl<T, Tin, GridDim::maxElementsPerThread>), dim3(grid), dim3(block), 0, 0, indices_data_sorted.get(), original_indices_sorted.get(), grad_data, output_data, num_indices, num_inputs, stride); } else { hipLaunchKernelGGL(HIP_KERNEL_NAME(_GatherGradImpl<T, Tin, GridDim::maxElementsPerThread>), dim3(grid), dim3(block), 0, 0, indices_data_sorted.get(), original_indices_sorted.get(), grad_data, output_data, num_indices, num_inputs, param_itrs, stride); } } #define SPECIALIZED_GRAD_IMPL2(T) \ template void GatherGradImpl<T, int64_t>( \ const RocmKernel& rocm_kernel, \ const T* grad_data, \ const int64_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); \ template void GatherGradImpl<T, int32_t>( \ const RocmKernel& rocm_kernel, \ const T* grad_data, \ const int32_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); SPECIALIZED_GRAD_IMPL2(float) SPECIALIZED_GRAD_IMPL2(half) } // namespace rocm } // namespace onnxruntime
855af85fad0ee22f446eec332c9760371afc8f6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <iostream> #include <cuda/api_wrappers.h> #include "HeterogeneousCore/CUDAUtilities/interface/cudastdAlgorithm.h" #include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h" __global__ void testBinaryFind() { int data[] = { 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6 }; auto lower = cuda_std::lower_bound(data, data+13, 4); auto upper = cuda_std::upper_bound(data, data+12, 4); assert(3 == upper-lower); // classic binary search, returning a value only if it is present constexpr int data2[] = { 1, 2, 4, 6, 9, 10 }; assert(data2+2 == cuda_std::binary_find(data2, data2+6, 4)); assert(data2+6 == cuda_std::binary_find(data2, data2+6, 5)); } void wrapper() { if (cuda::device::count() == 0) { std::cerr << "No CUDA devices on this system" << "\n"; exit(EXIT_FAILURE); } auto current_device = cuda::device::current::get(); cuda::launch(testBinaryFind, { 32, 64 }); } int main() { exitSansCUDADevices(); wrapper(); }
855af85fad0ee22f446eec332c9760371afc8f6d.cu
#include <cassert> #include <iostream> #include <cuda/api_wrappers.h> #include "HeterogeneousCore/CUDAUtilities/interface/cudastdAlgorithm.h" #include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h" __global__ void testBinaryFind() { int data[] = { 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6 }; auto lower = cuda_std::lower_bound(data, data+13, 4); auto upper = cuda_std::upper_bound(data, data+12, 4); assert(3 == upper-lower); // classic binary search, returning a value only if it is present constexpr int data2[] = { 1, 2, 4, 6, 9, 10 }; assert(data2+2 == cuda_std::binary_find(data2, data2+6, 4)); assert(data2+6 == cuda_std::binary_find(data2, data2+6, 5)); } void wrapper() { if (cuda::device::count() == 0) { std::cerr << "No CUDA devices on this system" << "\n"; exit(EXIT_FAILURE); } auto current_device = cuda::device::current::get(); cuda::launch(testBinaryFind, { 32, 64 }); } int main() { exitSansCUDADevices(); wrapper(); }
040d4accfbbe0704ca9438dbb4f0e1f5875e44c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "func.h" #include "func_hip.cuh" __global__ void Zero(int* dst, int h, int w) { int index = blockIdx.x *blockDim.x + threadIdx.x; if ( index >= h*w ) { return; } dst[index] = 0; } __global__ void Hough_compute(int* buffer, uchar* oldimg, int h, int w, int r) { int index = blockIdx.x *blockDim.x + threadIdx.x; int* dst = buffer; if ( index >= h*w ) { return; } int i = index % w; int j = index / w; #define ONE_STEP 10 if ((oldimg[index] > 0) && (i % ONE_STEP == 0) && (j % ONE_STEP == 0) ) { dst[index] = 190; //int a = 0; int x, y; for (int t = 0; t < 500; t++) { x = i + r * cos(3.1415926 / 250 * t); y = j + r * sin(3.1415926 / 250 * t); if (x >= 0 && x < w && y >= 0 && y < h) { atomicAdd((int*)&dst[x + y * w], 30); } } } } void Hough(bitmap &img, bitmap &oldimg, int r, uchar* &d_buffer) { hipError_t err; uchar* pixel; int* buffer = (int*)d_buffer; pixel = img.pixel; /* Not support full cuda err = hipMalloc(&pixel, sizeof(uchar)*img.w*img.h); CHECK_ERROR( err) err = hipMalloc(&oldpixel, sizeof(uchar)*img.w*img.h); CHECK_ERROR( err) err= hipMemcpy(pixel, img.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyHostToDevice); CHECK_ERROR( err) memcpy( oldimg.pixel, img.pixel, (std::size_t) sizeof(uchar)*img.w * img.h ); err= hipMemcpy(oldpixel, oldimg.pixel, sizeof(uchar)*img.w*img.h, hipMemcpyHostToDevice); CHECK_ERROR( err) */ // err= hipMemcpy(oldpixel, pixel, sizeof(uchar)*img.w*img.h, hipMemcpyDeviceToDevice); // CHECK_ERROR( err) hipLaunchKernelGGL(( Zero), dim3((img.w * img.h + BLOCK_SIZE) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, buffer, img.h, img.w); CHECK_LAST_ERROR hipLaunchKernelGGL(( Hough_compute), dim3((img.w * img.h + BLOCK_SIZE) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, buffer, pixel, img.h, img.w, r); CHECK_LAST_ERROR hipLaunchKernelGGL(( copy_back), dim3((img.w * img.h + BLOCK_SIZE) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, pixel, buffer, img.h, img.w, 1); CHECK_LAST_ERROR /* err= hipMemcpy(img.pixel, pixel, sizeof(uchar)*img.w*img.h, hipMemcpyDeviceToHost); CHECK_ERROR( err) err= hipMemcpy(oldimg.pixel, oldpixel, sizeof(uchar)*img.w*img.h, hipMemcpyDeviceToHost); CHECK_ERROR( err) err= hipFree(pixel); CHECK_ERROR( err) err= hipFree(oldpixel); CHECK_ERROR( err) */ }
040d4accfbbe0704ca9438dbb4f0e1f5875e44c4.cu
#include "func.h" #include "func.cuh" __global__ void Zero(int* dst, int h, int w) { int index = blockIdx.x *blockDim.x + threadIdx.x; if ( index >= h*w ) { return; } dst[index] = 0; } __global__ void Hough_compute(int* buffer, uchar* oldimg, int h, int w, int r) { int index = blockIdx.x *blockDim.x + threadIdx.x; int* dst = buffer; if ( index >= h*w ) { return; } int i = index % w; int j = index / w; #define ONE_STEP 10 if ((oldimg[index] > 0) && (i % ONE_STEP == 0) && (j % ONE_STEP == 0) ) { dst[index] = 190; //int a = 0; int x, y; for (int t = 0; t < 500; t++) { x = i + r * cos(3.1415926 / 250 * t); y = j + r * sin(3.1415926 / 250 * t); if (x >= 0 && x < w && y >= 0 && y < h) { atomicAdd((int*)&dst[x + y * w], 30); } } } } void Hough(bitmap &img, bitmap &oldimg, int r, uchar* &d_buffer) { cudaError err; uchar* pixel; int* buffer = (int*)d_buffer; pixel = img.pixel; /* Not support full cuda err = cudaMalloc(&pixel, sizeof(uchar)*img.w*img.h); CHECK_ERROR( err) err = cudaMalloc(&oldpixel, sizeof(uchar)*img.w*img.h); CHECK_ERROR( err) err= cudaMemcpy(pixel, img.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyHostToDevice); CHECK_ERROR( err) memcpy( oldimg.pixel, img.pixel, (std::size_t) sizeof(uchar)*img.w * img.h ); err= cudaMemcpy(oldpixel, oldimg.pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyHostToDevice); CHECK_ERROR( err) */ // err= cudaMemcpy(oldpixel, pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyDeviceToDevice); // CHECK_ERROR( err) Zero<<<(img.w * img.h + BLOCK_SIZE) / BLOCK_SIZE, BLOCK_SIZE>>>(buffer, img.h, img.w); CHECK_LAST_ERROR Hough_compute<<<(img.w * img.h + BLOCK_SIZE) / BLOCK_SIZE, BLOCK_SIZE>>>(buffer, pixel, img.h, img.w, r); CHECK_LAST_ERROR copy_back<<<(img.w * img.h + BLOCK_SIZE) / BLOCK_SIZE, BLOCK_SIZE>>>(pixel, buffer, img.h, img.w, 1); CHECK_LAST_ERROR /* err= cudaMemcpy(img.pixel, pixel, sizeof(uchar)*img.w*img.h, cudaMemcpyDeviceToHost); CHECK_ERROR( err) err= cudaMemcpy(oldimg.pixel, oldpixel, sizeof(uchar)*img.w*img.h, cudaMemcpyDeviceToHost); CHECK_ERROR( err) err= cudaFree(pixel); CHECK_ERROR( err) err= cudaFree(oldpixel); CHECK_ERROR( err) */ }
71d1d7bfb7f1de179aa116b058a8ee6065feb3b9.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cstdio> #include <iostream> #include <agency/agency.hpp> #include <agency/cuda.hpp> #include "annotating_executor.hpp" #include "async_copy.hpp" #include "bulk_invoke.hpp" #include "bulk_async.hpp" #include "for_each.hpp" void init_host_data( int n, double * x ) { auto policy = annotate(agency::cuda::par, "init_host_data", blue); for(int i=0; i<n; ++i) { x[i] = i; } } void init_data(int n, double* x, double* x_d, double* y_d) { auto policy = annotate(agency::cuda::par, "init_data", yellow); agency::cuda::future<void> copy_finished = experimental::async_copy(policy, x, x + n, x_d); agency::cuda::future<void> init_finished = experimental::bulk_async(policy(n), [=] __device__ (agency::parallel_agent& self) { y_d[self.index()] = n - self.index(); }); copy_finished.wait(); init_finished.wait(); } void daxpy(int n, double a, double* x, double* y) { auto policy = annotate(agency::cuda::par, "daxpy", magenta); experimental::bulk_invoke(policy(n), [=] __device__ (agency::parallel_agent& self) { int i = self.index(); y[i] = a*x[i] + y[i]; }); } void check_results(int n, double correctvalue, double* x_d) { auto policy = annotate(agency::cuda::par, "check_results", cyan); experimental::for_each(policy, x_d, x_d + n, [=] __host__ __device__ (double value) { if(value != correctvalue) { printf("ERROR, expected = %f, actual: %f\n", correctvalue, value); } }); } void run_test(int n) { { auto policy = annotate(agency::cuda::par, "run_test", green); double* x_d; double* y_d; hipSetDevice(0); std::vector<double> x(n); hipMalloc((void**)&x_d,n*sizeof(double)); hipMalloc((void**)&y_d,n*sizeof(double)); init_host_data(n, x.data()); init_data(n,x.data(),x_d,y_d); daxpy(n,1.0,x_d,y_d); check_results(n, n, y_d); hipFree(y_d); hipFree(x_d); hipDeviceSynchronize(); } std::cout << "OK" << std::endl; } int main() { int n = 1<<22; run_test(n); return 0; }
71d1d7bfb7f1de179aa116b058a8ee6065feb3b9.cu
/* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cstdio> #include <iostream> #include <agency/agency.hpp> #include <agency/cuda.hpp> #include "annotating_executor.hpp" #include "async_copy.hpp" #include "bulk_invoke.hpp" #include "bulk_async.hpp" #include "for_each.hpp" void init_host_data( int n, double * x ) { auto policy = annotate(agency::cuda::par, "init_host_data", blue); for(int i=0; i<n; ++i) { x[i] = i; } } void init_data(int n, double* x, double* x_d, double* y_d) { auto policy = annotate(agency::cuda::par, "init_data", yellow); agency::cuda::future<void> copy_finished = experimental::async_copy(policy, x, x + n, x_d); agency::cuda::future<void> init_finished = experimental::bulk_async(policy(n), [=] __device__ (agency::parallel_agent& self) { y_d[self.index()] = n - self.index(); }); copy_finished.wait(); init_finished.wait(); } void daxpy(int n, double a, double* x, double* y) { auto policy = annotate(agency::cuda::par, "daxpy", magenta); experimental::bulk_invoke(policy(n), [=] __device__ (agency::parallel_agent& self) { int i = self.index(); y[i] = a*x[i] + y[i]; }); } void check_results(int n, double correctvalue, double* x_d) { auto policy = annotate(agency::cuda::par, "check_results", cyan); experimental::for_each(policy, x_d, x_d + n, [=] __host__ __device__ (double value) { if(value != correctvalue) { printf("ERROR, expected = %f, actual: %f\n", correctvalue, value); } }); } void run_test(int n) { { auto policy = annotate(agency::cuda::par, "run_test", green); double* x_d; double* y_d; cudaSetDevice(0); std::vector<double> x(n); cudaMalloc((void**)&x_d,n*sizeof(double)); cudaMalloc((void**)&y_d,n*sizeof(double)); init_host_data(n, x.data()); init_data(n,x.data(),x_d,y_d); daxpy(n,1.0,x_d,y_d); check_results(n, n, y_d); cudaFree(y_d); cudaFree(x_d); cudaDeviceSynchronize(); } std::cout << "OK" << std::endl; } int main() { int n = 1<<22; run_test(n); return 0; }
921f4d9c9205ac9196d312702fae7db160d8a7fd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <iostream> #include <vector> #define SIZE 1024 #define SHMEM_SIZE 1024*sizeof(int) #define GRAPH_LAUNCH_ITERATIONS 300 using std::cout; using std::endl; __global__ void sum_reduction(int* v, int* v_r) { __shared__ int partial_sum[SHMEM_SIZE]; int tid = blockIdx.x * blockDim.x + threadIdx.x; partial_sum[threadIdx.x] = v[tid]; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { v_r[blockIdx.x] = partial_sum[0]; } } void initialize_vector(int* v, int n) { for (int i = 0; i < n; i++) { v[i] = rand() % 100; } } void verify_result(int* a, int* r, const int N) { int sum = 0; for (int i = 0; i < N; i++) { sum += a[i]; } if (sum != r[0]) { cout << "NOT SUCCESSFUL" << endl; exit(0); } } void cudaGraphAPIsumReduction(int* h_v, int* h_v_r, int* d_v, int* d_v_r, int TB_SIZE, int GRID_SIZE, int n) { hipStream_t streamForGraph; hipGraph_t graph; std::vector<hipGraphNode_t> nodeDependencies; hipGraphNode_t memcpyNode, kernelNode, memsetNode; double result_h = 0.0; hipStreamCreateWithFlags(&streamForGraph, hipStreamNonBlocking); cudaKernelNodeParams kernelNodeParams = { 0 }; hipMemcpy3DParms memcpyParams = { 0 }; cudaMemsetParams memsetParams = { 0 }; memcpyParams.srcArray = NULL; memcpyParams.srcPos = make_hipPos(0, 0, 0); memcpyParams.srcPtr = make_hipPitchedPtr(h_v, sizeof(int) * n, n, 1); memcpyParams.dstArray = NULL; memcpyParams.dstPos = make_hipPos(0, 0, 0); memcpyParams.dstPtr = make_hipPitchedPtr(d_v, sizeof(int) * n, n, 1); memcpyParams.extent = make_hipExtent(sizeof(int) * n, 1, 1); memcpyParams.kind = hipMemcpyHostToDevice; //Adding memsetParams node memsetParams.dst = (void*)d_v_r; memsetParams.value = 0; memsetParams.pitch = 0; memsetParams.elementSize = sizeof(int); memsetParams.width = n; memsetParams.height = 1; cudaGraphCreate(&graph, 0); cudaGraphAddMemcpyNode(&memcpyNode, graph, NULL, 0, &memcpyParams); cudaGraphAddMemsetNode(&memsetNode, graph, NULL, 0, &memsetParams); nodeDependencies.push_back(memsetNode); nodeDependencies.push_back(memcpyNode); void* kernelArgs[2] = { (void*)&d_v, (void*)&d_v_r }; kernelNodeParams.func = (void*)sum_reduction; kernelNodeParams.gridDim = dim3(GRID_SIZE, 1, 1); kernelNodeParams.blockDim = dim3(TB_SIZE, 1, 1); kernelNodeParams.sharedMemBytes = 0; kernelNodeParams.kernelParams = (void**)kernelArgs; kernelNodeParams.extra = NULL; cudaGraphAddKernelNode(&kernelNode, graph, nodeDependencies.data(), nodeDependencies.size(), &kernelNodeParams); nodeDependencies.clear(); nodeDependencies.push_back(kernelNode); memset(&kernelNodeParams, 0, sizeof(kernelNodeParams)); void* kernelArgs2[2] = { (void*)&d_v_r, (void*)&d_v_r }; kernelNodeParams.func = (void*)sum_reduction; kernelNodeParams.gridDim = dim3(1, 1, 1); kernelNodeParams.blockDim = dim3(TB_SIZE, 1, 1); kernelNodeParams.sharedMemBytes = 0; kernelNodeParams.kernelParams = (void**)kernelArgs2; kernelNodeParams.extra = NULL; cudaGraphAddKernelNode(&kernelNode, graph, nodeDependencies.data(), nodeDependencies.size(), &kernelNodeParams); nodeDependencies.clear(); nodeDependencies.push_back(kernelNode); memset(&memcpyParams, 0, sizeof(memcpyParams)); memcpyParams.srcArray = NULL; memcpyParams.srcPos = make_hipPos(0, 0, 0); memcpyParams.srcPtr = make_hipPitchedPtr(d_v_r, sizeof(int) * n, n, 1); memcpyParams.dstArray = NULL; memcpyParams.dstPos = make_hipPos(0, 0, 0); memcpyParams.dstPtr = make_hipPitchedPtr(h_v_r, sizeof(int) * n, n, 1); memcpyParams.extent = make_hipExtent(sizeof(int) * n, 1, 1); memcpyParams.kind = hipMemcpyDeviceToHost; cudaGraphAddMemcpyNode(&memcpyNode, graph, nodeDependencies.data(), nodeDependencies.size(), &memcpyParams); nodeDependencies.clear(); nodeDependencies.push_back(memcpyNode); hipGraphNode_t* nodes = NULL; size_t numNodes = 0; hipGraphGetNodes(graph, nodes, &numNodes); cout << "Num of nodes in the graph created manually = " << numNodes << endl; hipGraphExec_t graphExec; hipGraphInstantiate(&graphExec, graph, NULL, NULL, 0); hipGraph_t clonedGraph; hipGraphExec_t clonedGraphExec; cudaGraphClone(&clonedGraph, graph); hipGraphInstantiate(&clonedGraphExec, clonedGraph, NULL, NULL, 0); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); for (int i = 0; i < GRAPH_LAUNCH_ITERATIONS; i++){ hipGraphLaunch(graphExec, streamForGraph); hipStreamSynchronize(streamForGraph); verify_result(h_v, h_v_r, n); } hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << "Verifying Cloned Graph ..." << endl; for (int i = 0; i < GRAPH_LAUNCH_ITERATIONS; i++){ hipGraphLaunch(clonedGraphExec, streamForGraph); hipStreamSynchronize(streamForGraph); verify_result(h_v, h_v_r, n); } cout << "Done! Verifyied successfully" << endl; cout << "\nTime taken by using CUDA GRAPH in ms : " << milliseconds / GRAPH_LAUNCH_ITERATIONS << endl; hipGraphExecDestroy(graphExec); hipGraphExecDestroy(clonedGraphExec); hipGraphDestroy(graph); hipGraphDestroy(clonedGraph); hipStreamDestroy(streamForGraph); hipEventDestroy(start); hipEventDestroy(stop); } void sumReduction(int* h_v, int* h_v_r, int* d_v, int* d_v_r, int TB_SIZE, int GRID_SIZE, int n) { hipMemcpy(d_v, h_v, n * sizeof(int), hipMemcpyHostToDevice); sum_reduction << <GRID_SIZE, TB_SIZE >> > (d_v, d_v_r); sum_reduction << <1, TB_SIZE >> > (d_v_r, d_v_r); hipMemcpy(h_v_r, d_v_r, n * sizeof(int), hipMemcpyDeviceToHost); verify_result(h_v, h_v_r, n); } int main() { int n = 1 << 20; size_t bytes = n * sizeof(int); int* h_v, * h_v_r; int* d_v, * d_v_r; h_v = (int*)malloc(bytes); h_v_r = (int*)malloc(bytes); hipMalloc(&d_v, bytes); hipMalloc(&d_v_r, bytes); initialize_vector(h_v, n); int TB_SIZE = SIZE; int GRID_SIZE = (n + TB_SIZE - 1) / TB_SIZE; cout << "Normal Sum Reduction\n" << endl; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); for (int i = 0; i < GRAPH_LAUNCH_ITERATIONS; i++) { sumReduction(h_v, h_v_r, d_v, d_v_r, TB_SIZE, GRID_SIZE, n); hipStreamSynchronize(0); } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << "Time taken without CUDA GRAPH in ms : " << milliseconds/ GRAPH_LAUNCH_ITERATIONS << endl; cout << "----------------------------------------------------\n" << endl; cout << "Sum Reduction using CUDA GRAPHS (Graph API)\n" << endl; cudaGraphAPIsumReduction(h_v, h_v_r, d_v, d_v_r, TB_SIZE, GRID_SIZE, n); cout << "----------------------------------------------------\n" << endl; cout << "\nThe time is the average time of all the kernel launchs. The total kernel launches are " << GRAPH_LAUNCH_ITERATIONS << endl; hipFree(d_v); hipFree(d_v_r); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
921f4d9c9205ac9196d312702fae7db160d8a7fd.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> #include <vector> #define SIZE 1024 #define SHMEM_SIZE 1024*sizeof(int) #define GRAPH_LAUNCH_ITERATIONS 300 using std::cout; using std::endl; __global__ void sum_reduction(int* v, int* v_r) { __shared__ int partial_sum[SHMEM_SIZE]; int tid = blockIdx.x * blockDim.x + threadIdx.x; partial_sum[threadIdx.x] = v[tid]; __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { v_r[blockIdx.x] = partial_sum[0]; } } void initialize_vector(int* v, int n) { for (int i = 0; i < n; i++) { v[i] = rand() % 100; } } void verify_result(int* a, int* r, const int N) { int sum = 0; for (int i = 0; i < N; i++) { sum += a[i]; } if (sum != r[0]) { cout << "NOT SUCCESSFUL" << endl; exit(0); } } void cudaGraphAPIsumReduction(int* h_v, int* h_v_r, int* d_v, int* d_v_r, int TB_SIZE, int GRID_SIZE, int n) { cudaStream_t streamForGraph; cudaGraph_t graph; std::vector<cudaGraphNode_t> nodeDependencies; cudaGraphNode_t memcpyNode, kernelNode, memsetNode; double result_h = 0.0; cudaStreamCreateWithFlags(&streamForGraph, cudaStreamNonBlocking); cudaKernelNodeParams kernelNodeParams = { 0 }; cudaMemcpy3DParms memcpyParams = { 0 }; cudaMemsetParams memsetParams = { 0 }; memcpyParams.srcArray = NULL; memcpyParams.srcPos = make_cudaPos(0, 0, 0); memcpyParams.srcPtr = make_cudaPitchedPtr(h_v, sizeof(int) * n, n, 1); memcpyParams.dstArray = NULL; memcpyParams.dstPos = make_cudaPos(0, 0, 0); memcpyParams.dstPtr = make_cudaPitchedPtr(d_v, sizeof(int) * n, n, 1); memcpyParams.extent = make_cudaExtent(sizeof(int) * n, 1, 1); memcpyParams.kind = cudaMemcpyHostToDevice; //Adding memsetParams node memsetParams.dst = (void*)d_v_r; memsetParams.value = 0; memsetParams.pitch = 0; memsetParams.elementSize = sizeof(int); memsetParams.width = n; memsetParams.height = 1; cudaGraphCreate(&graph, 0); cudaGraphAddMemcpyNode(&memcpyNode, graph, NULL, 0, &memcpyParams); cudaGraphAddMemsetNode(&memsetNode, graph, NULL, 0, &memsetParams); nodeDependencies.push_back(memsetNode); nodeDependencies.push_back(memcpyNode); void* kernelArgs[2] = { (void*)&d_v, (void*)&d_v_r }; kernelNodeParams.func = (void*)sum_reduction; kernelNodeParams.gridDim = dim3(GRID_SIZE, 1, 1); kernelNodeParams.blockDim = dim3(TB_SIZE, 1, 1); kernelNodeParams.sharedMemBytes = 0; kernelNodeParams.kernelParams = (void**)kernelArgs; kernelNodeParams.extra = NULL; cudaGraphAddKernelNode(&kernelNode, graph, nodeDependencies.data(), nodeDependencies.size(), &kernelNodeParams); nodeDependencies.clear(); nodeDependencies.push_back(kernelNode); memset(&kernelNodeParams, 0, sizeof(kernelNodeParams)); void* kernelArgs2[2] = { (void*)&d_v_r, (void*)&d_v_r }; kernelNodeParams.func = (void*)sum_reduction; kernelNodeParams.gridDim = dim3(1, 1, 1); kernelNodeParams.blockDim = dim3(TB_SIZE, 1, 1); kernelNodeParams.sharedMemBytes = 0; kernelNodeParams.kernelParams = (void**)kernelArgs2; kernelNodeParams.extra = NULL; cudaGraphAddKernelNode(&kernelNode, graph, nodeDependencies.data(), nodeDependencies.size(), &kernelNodeParams); nodeDependencies.clear(); nodeDependencies.push_back(kernelNode); memset(&memcpyParams, 0, sizeof(memcpyParams)); memcpyParams.srcArray = NULL; memcpyParams.srcPos = make_cudaPos(0, 0, 0); memcpyParams.srcPtr = make_cudaPitchedPtr(d_v_r, sizeof(int) * n, n, 1); memcpyParams.dstArray = NULL; memcpyParams.dstPos = make_cudaPos(0, 0, 0); memcpyParams.dstPtr = make_cudaPitchedPtr(h_v_r, sizeof(int) * n, n, 1); memcpyParams.extent = make_cudaExtent(sizeof(int) * n, 1, 1); memcpyParams.kind = cudaMemcpyDeviceToHost; cudaGraphAddMemcpyNode(&memcpyNode, graph, nodeDependencies.data(), nodeDependencies.size(), &memcpyParams); nodeDependencies.clear(); nodeDependencies.push_back(memcpyNode); cudaGraphNode_t* nodes = NULL; size_t numNodes = 0; cudaGraphGetNodes(graph, nodes, &numNodes); cout << "Num of nodes in the graph created manually = " << numNodes << endl; cudaGraphExec_t graphExec; cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0); cudaGraph_t clonedGraph; cudaGraphExec_t clonedGraphExec; cudaGraphClone(&clonedGraph, graph); cudaGraphInstantiate(&clonedGraphExec, clonedGraph, NULL, NULL, 0); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for (int i = 0; i < GRAPH_LAUNCH_ITERATIONS; i++){ cudaGraphLaunch(graphExec, streamForGraph); cudaStreamSynchronize(streamForGraph); verify_result(h_v, h_v_r, n); } cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << "Verifying Cloned Graph ..." << endl; for (int i = 0; i < GRAPH_LAUNCH_ITERATIONS; i++){ cudaGraphLaunch(clonedGraphExec, streamForGraph); cudaStreamSynchronize(streamForGraph); verify_result(h_v, h_v_r, n); } cout << "Done! Verifyied successfully" << endl; cout << "\nTime taken by using CUDA GRAPH in ms : " << milliseconds / GRAPH_LAUNCH_ITERATIONS << endl; cudaGraphExecDestroy(graphExec); cudaGraphExecDestroy(clonedGraphExec); cudaGraphDestroy(graph); cudaGraphDestroy(clonedGraph); cudaStreamDestroy(streamForGraph); cudaEventDestroy(start); cudaEventDestroy(stop); } void sumReduction(int* h_v, int* h_v_r, int* d_v, int* d_v_r, int TB_SIZE, int GRID_SIZE, int n) { cudaMemcpy(d_v, h_v, n * sizeof(int), cudaMemcpyHostToDevice); sum_reduction << <GRID_SIZE, TB_SIZE >> > (d_v, d_v_r); sum_reduction << <1, TB_SIZE >> > (d_v_r, d_v_r); cudaMemcpy(h_v_r, d_v_r, n * sizeof(int), cudaMemcpyDeviceToHost); verify_result(h_v, h_v_r, n); } int main() { int n = 1 << 20; size_t bytes = n * sizeof(int); int* h_v, * h_v_r; int* d_v, * d_v_r; h_v = (int*)malloc(bytes); h_v_r = (int*)malloc(bytes); cudaMalloc(&d_v, bytes); cudaMalloc(&d_v_r, bytes); initialize_vector(h_v, n); int TB_SIZE = SIZE; int GRID_SIZE = (n + TB_SIZE - 1) / TB_SIZE; cout << "Normal Sum Reduction\n" << endl; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for (int i = 0; i < GRAPH_LAUNCH_ITERATIONS; i++) { sumReduction(h_v, h_v_r, d_v, d_v_r, TB_SIZE, GRID_SIZE, n); cudaStreamSynchronize(0); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << "Time taken without CUDA GRAPH in ms : " << milliseconds/ GRAPH_LAUNCH_ITERATIONS << endl; cout << "----------------------------------------------------\n" << endl; cout << "Sum Reduction using CUDA GRAPHS (Graph API)\n" << endl; cudaGraphAPIsumReduction(h_v, h_v_r, d_v, d_v_r, TB_SIZE, GRID_SIZE, n); cout << "----------------------------------------------------\n" << endl; cout << "\nThe time is the average time of all the kernel launchs. The total kernel launches are " << GRAPH_LAUNCH_ITERATIONS << endl; cudaFree(d_v); cudaFree(d_v_r); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
c6cbd231cc7fd0b9915224cfd540287348a5f298.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include "testcuda.h" __global__ void checkIndex(void){ int pos = blockDim.x * blockIdx.x + threadIdx.x; printf("pos = %d - threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim:(%d,%d,%d) gridDim(%d, %d, %d)\n", pos, threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z); /* double a = 0.0; int z = threadIdx.z+blockDim.z*blockIdx.z; int xy = gridDim.x*blockDim.x*gridDim.y*blockDim.y; int y = threadIdx.y+blockDim.y*blockIdx.y; int xx = gridDim.x*blockDim.x; int x_X = threadIdx.x+blockDim.x*blockIdx.x; int id = z*xy + y*xx + x_X; int id2 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; for(int i = 0; i < 10000000; i++){ a += i*0.000053123; } printf("a = %f... id=%d, id2=%d \n", a, id, id2); */ } __host__ void cudaProgram(void){ int nElem = 10; hipSetDevice(0); dim3 block(3); dim3 grid(3); printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); hipLaunchKernelGGL(( checkIndex), dim3(grid), dim3(block), 0, 0, ); printf("SU?"); hipDeviceSynchronize(); printf("SU?2"); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipDeviceReset(); }
c6cbd231cc7fd0b9915224cfd540287348a5f298.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include "testcuda.h" __global__ void checkIndex(void){ int pos = blockDim.x * blockIdx.x + threadIdx.x; printf("pos = %d - threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim:(%d,%d,%d) gridDim(%d, %d, %d)\n", pos, threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z); /* double a = 0.0; int z = threadIdx.z+blockDim.z*blockIdx.z; int xy = gridDim.x*blockDim.x*gridDim.y*blockDim.y; int y = threadIdx.y+blockDim.y*blockIdx.y; int xx = gridDim.x*blockDim.x; int x_X = threadIdx.x+blockDim.x*blockIdx.x; int id = z*xy + y*xx + x_X; int id2 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; for(int i = 0; i < 10000000; i++){ a += i*0.000053123; } printf("a = %f... id=%d, id2=%d \n", a, id, id2); */ } __host__ void cudaProgram(void){ int nElem = 10; cudaSetDevice(0); dim3 block(3); dim3 grid(3); printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); checkIndex<<<grid, block>>>(); printf("SU?"); cudaDeviceSynchronize(); printf("SU?2"); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaDeviceReset(); }
89d5351a639a24387d0c66951763f23db161db36.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=512 --gridDim=512 #include <hip/hip_runtime.h> ////////////////////////////////////////////////////////////////////////////// //// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF //// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO //// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A //// PARTICULAR PURPOSE. //// //// Copyright (c) Microsoft Corporation. All rights reserved ////////////////////////////////////////////////////////////////////////////// // Original kernels are templated. We will check the float case. #define _type float //---------------------------------------------------------------------------- // File: BitonicSort.cpp // // Implements Bitonic sort in C++ AMP // Supports only int, unsigned, long and unsigned long //---------------------------------------------------------------------------- #define BITONIC_TILE_SIZE 512 // Should be a square matrix #define NUM_ELEMENTS (BITONIC_TILE_SIZE * BITONIC_TILE_SIZE) #define MATRIX_WIDTH BITONIC_TILE_SIZE #define MATRIX_HEIGHT BITONIC_TILE_SIZE // Should be divisible by MATRIX_WIDTH and MATRIX_HEIGHT // else parallel_for_each will crash #define TRANSPOSE_TILE_SIZE 16 //---------------------------------------------------------------------------- // Kernel implements partial sorting on accelerator, BITONIC_TILE_SIZE at a time //---------------------------------------------------------------------------- __global__ void bitonic_sort_kernel(_type* data, unsigned ulevel, unsigned ulevelmask) { __shared__ _type sh_data[BITONIC_TILE_SIZE]; int local_idx = threadIdx.x; int global_idx = blockIdx.x*blockDim.x + threadIdx.x; // Cooperatively load data - each thread will load data from global memory // into tile_static sh_data[local_idx] = data[global_idx]; // Wait till all threads have loaded their portion of data #ifndef MUTATION /* BUGINJECT: REMOVE_BARRIER, DOWN */ __syncthreads(); #endif // Sort data in tile_static memory for (unsigned int j = ulevel >> 1 ; j > 0 ; j >>= 1) { _type result = ((sh_data[local_idx & ~j] <= sh_data[local_idx | j]) == (bool)(ulevelmask & global_idx)) ? sh_data[local_idx ^ j] : sh_data[local_idx]; __syncthreads(); sh_data[local_idx] = result; __syncthreads(); } // Store shared data data[global_idx] = sh_data[local_idx]; }
89d5351a639a24387d0c66951763f23db161db36.cu
//pass //--blockDim=512 --gridDim=512 #include <cuda.h> ////////////////////////////////////////////////////////////////////////////// //// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF //// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO //// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A //// PARTICULAR PURPOSE. //// //// Copyright (c) Microsoft Corporation. All rights reserved ////////////////////////////////////////////////////////////////////////////// // Original kernels are templated. We will check the float case. #define _type float //---------------------------------------------------------------------------- // File: BitonicSort.cpp // // Implements Bitonic sort in C++ AMP // Supports only int, unsigned, long and unsigned long //---------------------------------------------------------------------------- #define BITONIC_TILE_SIZE 512 // Should be a square matrix #define NUM_ELEMENTS (BITONIC_TILE_SIZE * BITONIC_TILE_SIZE) #define MATRIX_WIDTH BITONIC_TILE_SIZE #define MATRIX_HEIGHT BITONIC_TILE_SIZE // Should be divisible by MATRIX_WIDTH and MATRIX_HEIGHT // else parallel_for_each will crash #define TRANSPOSE_TILE_SIZE 16 //---------------------------------------------------------------------------- // Kernel implements partial sorting on accelerator, BITONIC_TILE_SIZE at a time //---------------------------------------------------------------------------- __global__ void bitonic_sort_kernel(_type* data, unsigned ulevel, unsigned ulevelmask) { __shared__ _type sh_data[BITONIC_TILE_SIZE]; int local_idx = threadIdx.x; int global_idx = blockIdx.x*blockDim.x + threadIdx.x; // Cooperatively load data - each thread will load data from global memory // into tile_static sh_data[local_idx] = data[global_idx]; // Wait till all threads have loaded their portion of data #ifndef MUTATION /* BUGINJECT: REMOVE_BARRIER, DOWN */ __syncthreads(); #endif // Sort data in tile_static memory for (unsigned int j = ulevel >> 1 ; j > 0 ; j >>= 1) { _type result = ((sh_data[local_idx & ~j] <= sh_data[local_idx | j]) == (bool)(ulevelmask & global_idx)) ? sh_data[local_idx ^ j] : sh_data[local_idx]; __syncthreads(); sh_data[local_idx] = result; __syncthreads(); } // Store shared data data[global_idx] = sh_data[local_idx]; }
5aa93684a3cd71757c1db15c6888220ec455bf7f.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_fp16.h> #include <hipcub/hipcub.hpp> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const platform::CUDADeviceContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto dtype = platform::ToNCCLDataType(framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } template <typename T> class FusedAttentionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto *input_x = ctx.Input<Tensor>("X"); const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_bias = ctx.Input<Tensor>("LnBias"); auto *ln_mean = ctx.Output<Tensor>("LnMean"); auto *ln_var = ctx.Output<Tensor>("LnVariance"); auto *ln_out = ctx.Output<Tensor>("LnOut"); // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *qkv_out = ctx.Output<Tensor>("QKVOut"); auto *qkv_bias_out = ctx.Output<Tensor>("QKVBiasOut"); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *transpose_out_2 = ctx.Output<Tensor>("TransposeOut2"); auto *cache_kv = ctx.Input<Tensor>("CacheKV"); auto *cache_kv_out = ctx.Output<Tensor>("CacheKVOut"); auto *qk_out = ctx.Output<Tensor>("QKOut"); auto *qktv_out = ctx.Output<Tensor>("QKTVOut"); auto *softmax_out = ctx.Output<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Output<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Output<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Output<Tensor>("SrcMaskOut"); auto *fmha_out = ctx.Output<Tensor>("FMHAOut"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *out_linear_out = ctx.Output<Tensor>("OutLinearOut"); auto *ln_scale_2 = ctx.Input<Tensor>("Ln2Scale"); auto *ln_bias_2 = ctx.Input<Tensor>("Ln2Bias"); auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Output<Tensor>("BiasDropoutResidualOut"); auto *ln_mean_2 = ctx.Output<Tensor>("Ln2Mean"); auto *ln_var_2 = ctx.Output<Tensor>("Ln2Variance"); const float ln_epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // final output. auto *out = ctx.Output<Tensor>("Y"); // get data ptr for qkv part. const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); auto *x_data = input_x->data<T>(); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *qkv_out_data = qkv_out->mutable_data<T>(ctx.GetPlace()); auto *qkv_bias_out_data = (qkv_bias == nullptr) ? nullptr : qkv_bias_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for FMHA. auto *transpose_out_2_data = transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *cache_kv_out_data = (cache_kv_out == nullptr) ? nullptr : cache_kv_out->mutable_data<T>(ctx.GetPlace()); auto *qk_out_data = qk_out->mutable_data<T>(ctx.GetPlace()); auto *qktv_out_data = qktv_out->mutable_data<T>(ctx.GetPlace()); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *softmax_out_data = softmax_out->mutable_data<T>(ctx.GetPlace()); auto *attn_dropout_mask_out_data = attn_dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *attn_dropout_out_data = attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *fmha_out_data = fmha_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for out_linear. auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); auto *out_linear_out_data = out_linear_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for bias+dropout+residual+layernorm auto *dropout_mask_out_data = dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *final_out_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); bool compute_bias = true; if (qkv_bias == nullptr) { compute_bias = false; } // (transA, transB, compute_bias) = (false, true, true) auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, true, bsz_seq, output_size, input_size, compute_bias); AttnDropoutParam attn_dropout_param( is_test_1, dropout_implementation_1, attn_dropout_rate, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; // (transA, transB, compute_bias) = (false, false, false) // NOTE(Yuang Liu): For general input size == output size, change the // position won't have effects. For mp, the output size is mp_head * dkey // which is actually the input size. While the input size is hidden size, // which is actually the output size. So for out linear, switch the // input size and output size. auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, false, bsz_seq, input_size, output_size, false); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln_epsilon); if (pre_layer_norm) { auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>()); auto *ln_mean_data = ln_mean->mutable_data<U>(ctx.GetPlace()); auto *ln_var_data = ln_var->mutable_data<U>(ctx.GetPlace()); auto *ln_out_data = ln_out->mutable_data<T>(ctx.GetPlace()); layer_norm_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, ln_out_data, ln_mean_data, ln_var_data); qkv_compute.ComputeForward(qkv_weight, ln_out, qkv_bias, qkv_out, qkv_bias_out); } else { qkv_compute.ComputeForward(qkv_weight, input_x, qkv_bias, qkv_out, qkv_bias_out); } if (qkv_bias == nullptr) { fmha_ref_compute.ComputeForward( *qkv_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } else { fmha_ref_compute.ComputeForward( *qkv_bias_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } // fmha_out: [batch_size, seq_len, num_head, head_dim] // weight: [embed_dim, embed_dim] // out_linear_out: [batch_size, seq_len, embed_dim] out_linear_compute.ComputeForward(out_linear_weight, fmha_out, nullptr, out_linear_out, nullptr); // tensor model parallel AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context()); if (pre_layer_norm) { // output = (residual + dropout(input + bias)) fused_dropout_layernorm_helper.ResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, x_data, out_linear_bias_data, final_out_data, dropout_mask_out_data); } else { auto *ln_scale_2_data = (ln_scale_2 == nullptr ? nullptr : ln_scale_2->data<U>()); auto *ln_bias_2_data = (ln_bias_2 == nullptr ? nullptr : ln_bias_2->data<U>()); auto *bias_dropout_residual_out_data = bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); auto *ln_mean_2_data = ln_mean_2->mutable_data<U>(ctx.GetPlace()); auto *ln_var_2_data = ln_var_2->mutable_data<U>(ctx.GetPlace()); // output = layernorm(residual + dropout(input + bias)) fused_dropout_layernorm_helper.LayernormResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, x_data, out_linear_bias_data, ln_scale_2_data, ln_bias_2_data, bias_dropout_residual_out_data, dropout_mask_out_data, final_out_data, ln_mean_2_data, ln_var_2_data); } } }; template <typename T> class FusedAttentionGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); const float ln2epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // get inputs. auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); auto *d_y_data = d_y->data<T>(); // fw input auto *input_x = ctx.Input<Tensor>("X"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_2_scale = ctx.Input<Tensor>("Ln2Scale"); auto *x_data = input_x->data<T>(); auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_2_scale_data = (ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>()); // fw parameters. auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *src_mask_data = (src_mask == nullptr ? nullptr : src_mask->data<T>()); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); // fw output auto *fmha_out = ctx.Input<Tensor>("FMHAOut"); auto *transpose_out_2 = ctx.Input<Tensor>("TransposeOut2"); auto *qk_out = ctx.Input<Tensor>("QKOut"); auto *qktv_out = ctx.Input<Tensor>("QKTVOut"); auto *softmax_out = ctx.Input<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Input<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Input<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Input<Tensor>("SrcMaskOut"); auto *out_linear_out = ctx.Input<Tensor>("OutLinearOut"); auto *ln_2_mean = ctx.Input<Tensor>("Ln2Mean"); auto *ln_2_var = ctx.Input<Tensor>("Ln2Variance"); auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Input<Tensor>("BiasDropoutResidualOut"); auto *fmha_out_data = fmha_out->data<T>(); auto *transpose_out_2_data = transpose_out_2->data<T>(); auto *qk_out_data = qk_out->data<T>(); auto *qktv_out_data = qktv_out->data<T>(); auto *softmax_out_data = softmax_out->data<T>(); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->data<T>(); auto *out_linear_out_data = out_linear_out->data<T>(); auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>(); // output's grad auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_qkv_out = ctx.Output<Tensor>(framework::GradVarName("QKVOut")); auto *d_qkv_bias_out = ctx.Output<Tensor>(framework::GradVarName("QKVBiasOut")); auto *d_qktv_out = ctx.Output<Tensor>(framework::GradVarName("QKTVOut")); auto *d_transpose_out_2 = ctx.Output<Tensor>(framework::GradVarName("TransposeOut2")); auto *d_qk_out = ctx.Output<Tensor>(framework::GradVarName("QKOut")); auto *d_softmax_out = ctx.Output<Tensor>(framework::GradVarName("SoftmaxOut")); auto *d_attn_dropout_out = ctx.Output<Tensor>(framework::GradVarName("AttnDropoutOut")); auto *d_src_mask_out = ctx.Output<Tensor>(framework::GradVarName("SrcMaskOut")); auto *d_fmha_out = ctx.Output<Tensor>(framework::GradVarName("FMHAOut")); auto *d_out_linear_out = ctx.Output<Tensor>(framework::GradVarName("OutLinearOut")); auto *d_bias_dropout_residual_out = ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut")); auto *d_x_data = d_x->mutable_data<T>(ctx.GetPlace()); // when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the // space can be reused. auto *d_qkv_out_data = (d_qkv_bias_out != nullptr) ? nullptr : d_qkv_out->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_out_data = (d_qkv_bias_out == nullptr) ? nullptr : d_qkv_bias_out->mutable_data<T>(ctx.GetPlace()); auto *d_qktv_out_data = d_qktv_out->mutable_data<T>(ctx.GetPlace()); auto *d_transpose_out_2_data = d_transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *d_qk_out_data = d_qk_out->mutable_data<T>(ctx.GetPlace()); auto *d_softmax_out_data = d_softmax_out->mutable_data<T>(ctx.GetPlace()); auto *d_attn_dropout_out_data = d_attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *d_src_mask_out_data = (src_mask == nullptr) ? nullptr : d_src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *d_fmha_out_data = d_fmha_out->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_out_data = d_out_linear_out->mutable_data<T>(ctx.GetPlace()); // parameter grad auto *d_qkv_weight = ctx.Output<Tensor>(framework::GradVarName("QKVW")); auto *d_qkv_bias = ctx.Output<Tensor>(framework::GradVarName("QKVBias")); auto *d_out_linear_weight = ctx.Output<Tensor>(framework::GradVarName("OutLinearW")); auto *d_out_linear_bias = ctx.Output<Tensor>(framework::GradVarName("OutLinearBias")); auto *d_ln_2_scale = ctx.Output<Tensor>(framework::GradVarName("Ln2Scale")); auto *d_ln_2_bias = ctx.Output<Tensor>(framework::GradVarName("Ln2Bias")); auto *d_qkv_weight_data = d_qkv_weight->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_data = (d_qkv_bias == nullptr) ? nullptr : d_qkv_bias->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_weight_data = d_out_linear_weight->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_bias_data = (d_out_linear_bias == nullptr) ? nullptr : d_out_linear_bias->mutable_data<T>(ctx.GetPlace()); const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; Tensor d_residual; d_residual.Resize(input_x_dims); T *d_residual_data = d_residual.mutable_data<T>(ctx.GetPlace()); bool transA = false; bool transB = true; bool compute_qkv_bias = true; if (qkv_bias == nullptr) { compute_qkv_bias = false; } auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, output_size, input_size, compute_qkv_bias); AttnDropoutParam attn_dropout_param( is_test_1, dropout_implementation_1, attn_dropout_prob, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; transA = false; transB = false; bool compute_bias = false; // (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed) auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, input_size, output_size, compute_bias); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln2epsilon); if (pre_layer_norm) { fused_dropout_layernorm_helper.ResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, dropout_mask_out_data, d_out_linear_out_data, d_residual_data, d_out_linear_bias_data); } else { auto *ln_2_mean_data = ln_2_mean->data<U>(); auto *ln_2_var_data = ln_2_var->data<U>(); auto *bias_dropout_residual_out_data = bias_dropout_residual_out->data<T>(); auto *d_ln_2_scale_data = (d_ln_2_scale == nullptr ? nullptr : d_ln_2_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_2_bias_data = (d_ln_2_bias == nullptr ? nullptr : d_ln_2_bias->mutable_data<U>(ctx.GetPlace())); auto *d_bias_dropout_residual_out_data = d_bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, bias_dropout_residual_out_data, dropout_mask_out_data, ln_2_scale_data, ln_2_mean_data, ln_2_var_data, d_bias_dropout_residual_out_data, d_ln_2_scale_data, d_ln_2_bias_data, d_out_linear_out_data, d_out_linear_bias_data, d_residual_data); } out_linear_compute.ComputeBackward(fmha_out, out_linear_weight, d_out_linear_out, d_fmha_out, d_out_linear_weight, nullptr); if (qkv_bias != nullptr) { fmha_ref_compute.ComputeBackward( *transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_bias_out); } else { fmha_ref_compute.ComputeBackward( *transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_out); } if (pre_layer_norm) { auto *ln_mean = ctx.Input<Tensor>("LnMean"); auto *ln_var = ctx.Input<Tensor>("LnVariance"); auto *ln_out = ctx.Input<Tensor>("LnOut"); auto *ln_mean_data = ln_mean->data<U>(); auto *ln_var_data = ln_var->data<U>(); auto *ln_out_data = ln_out->data<T>(); auto *d_ln_out = ctx.Output<Tensor>(framework::GradVarName("LnOut")); auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale")); auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias")); auto *d_ln_out_data = d_ln_out->mutable_data<T>(ctx.GetPlace()); auto *d_ln_scale_data = (d_ln_scale == nullptr ? nullptr : d_ln_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_bias_data = (d_ln_bias == nullptr ? nullptr : d_ln_bias->mutable_data<U>(ctx.GetPlace())); if (qkv_bias != nullptr) { qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_bias_out, d_ln_out, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_out, d_ln_out, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context()); layer_norm_compute.ComputeBackward(x_data, d_ln_out_data, ln_scale_data, ln_mean_data, ln_var_data, d_x_data, d_ln_scale_data, d_ln_bias_data); } else { if (qkv_bias != nullptr) { qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_bias_out, d_x, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_out, d_x, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context()); } // gradient accumulation std::vector<const Tensor *> ins; std::vector<Tensor *> outs; ins.emplace_back(&d_residual); ins.emplace_back(d_x); outs.emplace_back(d_x); int elewise_add_axis = -1; phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>( ctx.cuda_device_context(), ins, &outs, elewise_add_axis, phi::funcs::AddFunctor<T>()); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_attention, ops::FusedAttentionOpKernel<float>, ops::FusedAttentionOpKernel<double>, ops::FusedAttentionOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(fused_attention_grad, ops::FusedAttentionGradKernel<float>, ops::FusedAttentionGradKernel<double>, ops::FusedAttentionGradKernel<plat::float16>);
5aa93684a3cd71757c1db15c6888220ec455bf7f.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_fp16.h> #include <cub/cub.cuh> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const platform::CUDADeviceContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto dtype = platform::ToNCCLDataType(framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } template <typename T> class FusedAttentionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto *input_x = ctx.Input<Tensor>("X"); const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_bias = ctx.Input<Tensor>("LnBias"); auto *ln_mean = ctx.Output<Tensor>("LnMean"); auto *ln_var = ctx.Output<Tensor>("LnVariance"); auto *ln_out = ctx.Output<Tensor>("LnOut"); // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *qkv_out = ctx.Output<Tensor>("QKVOut"); auto *qkv_bias_out = ctx.Output<Tensor>("QKVBiasOut"); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *transpose_out_2 = ctx.Output<Tensor>("TransposeOut2"); auto *cache_kv = ctx.Input<Tensor>("CacheKV"); auto *cache_kv_out = ctx.Output<Tensor>("CacheKVOut"); auto *qk_out = ctx.Output<Tensor>("QKOut"); auto *qktv_out = ctx.Output<Tensor>("QKTVOut"); auto *softmax_out = ctx.Output<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Output<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Output<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Output<Tensor>("SrcMaskOut"); auto *fmha_out = ctx.Output<Tensor>("FMHAOut"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *out_linear_out = ctx.Output<Tensor>("OutLinearOut"); auto *ln_scale_2 = ctx.Input<Tensor>("Ln2Scale"); auto *ln_bias_2 = ctx.Input<Tensor>("Ln2Bias"); auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Output<Tensor>("BiasDropoutResidualOut"); auto *ln_mean_2 = ctx.Output<Tensor>("Ln2Mean"); auto *ln_var_2 = ctx.Output<Tensor>("Ln2Variance"); const float ln_epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // final output. auto *out = ctx.Output<Tensor>("Y"); // get data ptr for qkv part. const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); auto *x_data = input_x->data<T>(); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *qkv_out_data = qkv_out->mutable_data<T>(ctx.GetPlace()); auto *qkv_bias_out_data = (qkv_bias == nullptr) ? nullptr : qkv_bias_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for FMHA. auto *transpose_out_2_data = transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *cache_kv_out_data = (cache_kv_out == nullptr) ? nullptr : cache_kv_out->mutable_data<T>(ctx.GetPlace()); auto *qk_out_data = qk_out->mutable_data<T>(ctx.GetPlace()); auto *qktv_out_data = qktv_out->mutable_data<T>(ctx.GetPlace()); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *softmax_out_data = softmax_out->mutable_data<T>(ctx.GetPlace()); auto *attn_dropout_mask_out_data = attn_dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *attn_dropout_out_data = attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *fmha_out_data = fmha_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for out_linear. auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); auto *out_linear_out_data = out_linear_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for bias+dropout+residual+layernorm auto *dropout_mask_out_data = dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *final_out_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); bool compute_bias = true; if (qkv_bias == nullptr) { compute_bias = false; } // (transA, transB, compute_bias) = (false, true, true) auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, true, bsz_seq, output_size, input_size, compute_bias); AttnDropoutParam attn_dropout_param( is_test_1, dropout_implementation_1, attn_dropout_rate, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; // (transA, transB, compute_bias) = (false, false, false) // NOTE(Yuang Liu): For general input size == output size, change the // position won't have effects. For mp, the output size is mp_head * dkey // which is actually the input size. While the input size is hidden size, // which is actually the output size. So for out linear, switch the // input size and output size. auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, false, bsz_seq, input_size, output_size, false); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln_epsilon); if (pre_layer_norm) { auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>()); auto *ln_mean_data = ln_mean->mutable_data<U>(ctx.GetPlace()); auto *ln_var_data = ln_var->mutable_data<U>(ctx.GetPlace()); auto *ln_out_data = ln_out->mutable_data<T>(ctx.GetPlace()); layer_norm_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, ln_out_data, ln_mean_data, ln_var_data); qkv_compute.ComputeForward(qkv_weight, ln_out, qkv_bias, qkv_out, qkv_bias_out); } else { qkv_compute.ComputeForward(qkv_weight, input_x, qkv_bias, qkv_out, qkv_bias_out); } if (qkv_bias == nullptr) { fmha_ref_compute.ComputeForward( *qkv_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } else { fmha_ref_compute.ComputeForward( *qkv_bias_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } // fmha_out: [batch_size, seq_len, num_head, head_dim] // weight: [embed_dim, embed_dim] // out_linear_out: [batch_size, seq_len, embed_dim] out_linear_compute.ComputeForward(out_linear_weight, fmha_out, nullptr, out_linear_out, nullptr); // tensor model parallel AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context()); if (pre_layer_norm) { // output = (residual + dropout(input + bias)) fused_dropout_layernorm_helper.ResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, x_data, out_linear_bias_data, final_out_data, dropout_mask_out_data); } else { auto *ln_scale_2_data = (ln_scale_2 == nullptr ? nullptr : ln_scale_2->data<U>()); auto *ln_bias_2_data = (ln_bias_2 == nullptr ? nullptr : ln_bias_2->data<U>()); auto *bias_dropout_residual_out_data = bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); auto *ln_mean_2_data = ln_mean_2->mutable_data<U>(ctx.GetPlace()); auto *ln_var_2_data = ln_var_2->mutable_data<U>(ctx.GetPlace()); // output = layernorm(residual + dropout(input + bias)) fused_dropout_layernorm_helper.LayernormResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, x_data, out_linear_bias_data, ln_scale_2_data, ln_bias_2_data, bias_dropout_residual_out_data, dropout_mask_out_data, final_out_data, ln_mean_2_data, ln_var_2_data); } } }; template <typename T> class FusedAttentionGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); const float ln2epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // get inputs. auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); auto *d_y_data = d_y->data<T>(); // fw input auto *input_x = ctx.Input<Tensor>("X"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_2_scale = ctx.Input<Tensor>("Ln2Scale"); auto *x_data = input_x->data<T>(); auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_2_scale_data = (ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>()); // fw parameters. auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *src_mask_data = (src_mask == nullptr ? nullptr : src_mask->data<T>()); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); // fw output auto *fmha_out = ctx.Input<Tensor>("FMHAOut"); auto *transpose_out_2 = ctx.Input<Tensor>("TransposeOut2"); auto *qk_out = ctx.Input<Tensor>("QKOut"); auto *qktv_out = ctx.Input<Tensor>("QKTVOut"); auto *softmax_out = ctx.Input<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Input<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Input<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Input<Tensor>("SrcMaskOut"); auto *out_linear_out = ctx.Input<Tensor>("OutLinearOut"); auto *ln_2_mean = ctx.Input<Tensor>("Ln2Mean"); auto *ln_2_var = ctx.Input<Tensor>("Ln2Variance"); auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Input<Tensor>("BiasDropoutResidualOut"); auto *fmha_out_data = fmha_out->data<T>(); auto *transpose_out_2_data = transpose_out_2->data<T>(); auto *qk_out_data = qk_out->data<T>(); auto *qktv_out_data = qktv_out->data<T>(); auto *softmax_out_data = softmax_out->data<T>(); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->data<T>(); auto *out_linear_out_data = out_linear_out->data<T>(); auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>(); // output's grad auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_qkv_out = ctx.Output<Tensor>(framework::GradVarName("QKVOut")); auto *d_qkv_bias_out = ctx.Output<Tensor>(framework::GradVarName("QKVBiasOut")); auto *d_qktv_out = ctx.Output<Tensor>(framework::GradVarName("QKTVOut")); auto *d_transpose_out_2 = ctx.Output<Tensor>(framework::GradVarName("TransposeOut2")); auto *d_qk_out = ctx.Output<Tensor>(framework::GradVarName("QKOut")); auto *d_softmax_out = ctx.Output<Tensor>(framework::GradVarName("SoftmaxOut")); auto *d_attn_dropout_out = ctx.Output<Tensor>(framework::GradVarName("AttnDropoutOut")); auto *d_src_mask_out = ctx.Output<Tensor>(framework::GradVarName("SrcMaskOut")); auto *d_fmha_out = ctx.Output<Tensor>(framework::GradVarName("FMHAOut")); auto *d_out_linear_out = ctx.Output<Tensor>(framework::GradVarName("OutLinearOut")); auto *d_bias_dropout_residual_out = ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut")); auto *d_x_data = d_x->mutable_data<T>(ctx.GetPlace()); // when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the // space can be reused. auto *d_qkv_out_data = (d_qkv_bias_out != nullptr) ? nullptr : d_qkv_out->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_out_data = (d_qkv_bias_out == nullptr) ? nullptr : d_qkv_bias_out->mutable_data<T>(ctx.GetPlace()); auto *d_qktv_out_data = d_qktv_out->mutable_data<T>(ctx.GetPlace()); auto *d_transpose_out_2_data = d_transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *d_qk_out_data = d_qk_out->mutable_data<T>(ctx.GetPlace()); auto *d_softmax_out_data = d_softmax_out->mutable_data<T>(ctx.GetPlace()); auto *d_attn_dropout_out_data = d_attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *d_src_mask_out_data = (src_mask == nullptr) ? nullptr : d_src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *d_fmha_out_data = d_fmha_out->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_out_data = d_out_linear_out->mutable_data<T>(ctx.GetPlace()); // parameter grad auto *d_qkv_weight = ctx.Output<Tensor>(framework::GradVarName("QKVW")); auto *d_qkv_bias = ctx.Output<Tensor>(framework::GradVarName("QKVBias")); auto *d_out_linear_weight = ctx.Output<Tensor>(framework::GradVarName("OutLinearW")); auto *d_out_linear_bias = ctx.Output<Tensor>(framework::GradVarName("OutLinearBias")); auto *d_ln_2_scale = ctx.Output<Tensor>(framework::GradVarName("Ln2Scale")); auto *d_ln_2_bias = ctx.Output<Tensor>(framework::GradVarName("Ln2Bias")); auto *d_qkv_weight_data = d_qkv_weight->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_data = (d_qkv_bias == nullptr) ? nullptr : d_qkv_bias->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_weight_data = d_out_linear_weight->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_bias_data = (d_out_linear_bias == nullptr) ? nullptr : d_out_linear_bias->mutable_data<T>(ctx.GetPlace()); const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; Tensor d_residual; d_residual.Resize(input_x_dims); T *d_residual_data = d_residual.mutable_data<T>(ctx.GetPlace()); bool transA = false; bool transB = true; bool compute_qkv_bias = true; if (qkv_bias == nullptr) { compute_qkv_bias = false; } auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, output_size, input_size, compute_qkv_bias); AttnDropoutParam attn_dropout_param( is_test_1, dropout_implementation_1, attn_dropout_prob, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; transA = false; transB = false; bool compute_bias = false; // (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed) auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, input_size, output_size, compute_bias); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln2epsilon); if (pre_layer_norm) { fused_dropout_layernorm_helper.ResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, dropout_mask_out_data, d_out_linear_out_data, d_residual_data, d_out_linear_bias_data); } else { auto *ln_2_mean_data = ln_2_mean->data<U>(); auto *ln_2_var_data = ln_2_var->data<U>(); auto *bias_dropout_residual_out_data = bias_dropout_residual_out->data<T>(); auto *d_ln_2_scale_data = (d_ln_2_scale == nullptr ? nullptr : d_ln_2_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_2_bias_data = (d_ln_2_bias == nullptr ? nullptr : d_ln_2_bias->mutable_data<U>(ctx.GetPlace())); auto *d_bias_dropout_residual_out_data = d_bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, bias_dropout_residual_out_data, dropout_mask_out_data, ln_2_scale_data, ln_2_mean_data, ln_2_var_data, d_bias_dropout_residual_out_data, d_ln_2_scale_data, d_ln_2_bias_data, d_out_linear_out_data, d_out_linear_bias_data, d_residual_data); } out_linear_compute.ComputeBackward(fmha_out, out_linear_weight, d_out_linear_out, d_fmha_out, d_out_linear_weight, nullptr); if (qkv_bias != nullptr) { fmha_ref_compute.ComputeBackward( *transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_bias_out); } else { fmha_ref_compute.ComputeBackward( *transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_out); } if (pre_layer_norm) { auto *ln_mean = ctx.Input<Tensor>("LnMean"); auto *ln_var = ctx.Input<Tensor>("LnVariance"); auto *ln_out = ctx.Input<Tensor>("LnOut"); auto *ln_mean_data = ln_mean->data<U>(); auto *ln_var_data = ln_var->data<U>(); auto *ln_out_data = ln_out->data<T>(); auto *d_ln_out = ctx.Output<Tensor>(framework::GradVarName("LnOut")); auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale")); auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias")); auto *d_ln_out_data = d_ln_out->mutable_data<T>(ctx.GetPlace()); auto *d_ln_scale_data = (d_ln_scale == nullptr ? nullptr : d_ln_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_bias_data = (d_ln_bias == nullptr ? nullptr : d_ln_bias->mutable_data<U>(ctx.GetPlace())); if (qkv_bias != nullptr) { qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_bias_out, d_ln_out, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_out, d_ln_out, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context()); layer_norm_compute.ComputeBackward(x_data, d_ln_out_data, ln_scale_data, ln_mean_data, ln_var_data, d_x_data, d_ln_scale_data, d_ln_bias_data); } else { if (qkv_bias != nullptr) { qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_bias_out, d_x, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_out, d_x, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context()); } // gradient accumulation std::vector<const Tensor *> ins; std::vector<Tensor *> outs; ins.emplace_back(&d_residual); ins.emplace_back(d_x); outs.emplace_back(d_x); int elewise_add_axis = -1; phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>( ctx.cuda_device_context(), ins, &outs, elewise_add_axis, phi::funcs::AddFunctor<T>()); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_attention, ops::FusedAttentionOpKernel<float>, ops::FusedAttentionOpKernel<double>, ops::FusedAttentionOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(fused_attention_grad, ops::FusedAttentionGradKernel<float>, ops::FusedAttentionGradKernel<double>, ops::FusedAttentionGradKernel<plat::float16>);
dea4f1381cf15a9761be1b61fd8b745551c97a5c.hip
// !!! This is a file automatically generated by hipify!!! #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<thrust/reduce.h> #include<thrust/scan.h> #include<device_launch_parameters.h> #include<stdio.h> #include<stdlib.h> #define DATA_SIZE 10000000 int main(void) { int count = 0; int d_scan; float t_scan; hipEvent_t start1,stop1; hipEventCreate(&start1); hipEventCreate(&stop1); //initialze random values on host thrust::host_vector<int> data(DATA_SIZE); thrust::host_vector<int> h(DATA_SIZE); thrust::generate(data.begin(), data.end(), rand); thrust::exclusive_scan(data.begin(), data.end(), h); hipEventRecord(start1,NULL); //copy host vector to device thrust::device_vector<int> gpudata = data; thrust::exclusive_scan(gpudata.begin(), gpudata.end(), gpudata.begin()); //copy back to host thrust::copy(gpudata.begin(), gpudata.end(), data.begin()); hipEventRecord(stop1, NULL); hipEventSynchronize(stop1); hipEventElapsedTime(&t_scan, start1, stop1); printf("\n Scan time is %f ms", t_scan); // thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); for (int i= 0; i<DATA_SIZE; i++) { if(fabs(h[i] - gpudata[i])==0.001) count++; else break; } if (count<DATA_SIZE) printf("\n Error!!"); else printf("Looks good"); }
dea4f1381cf15a9761be1b61fd8b745551c97a5c.cu
#include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<thrust/reduce.h> #include<thrust/scan.h> #include<device_launch_parameters.h> #include<stdio.h> #include<stdlib.h> #define DATA_SIZE 10000000 int main(void) { int count = 0; int d_scan; float t_scan; cudaEvent_t start1,stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); //initialze random values on host thrust::host_vector<int> data(DATA_SIZE); thrust::host_vector<int> h(DATA_SIZE); thrust::generate(data.begin(), data.end(), rand); thrust::exclusive_scan(data.begin(), data.end(), h); cudaEventRecord(start1,NULL); //copy host vector to device thrust::device_vector<int> gpudata = data; thrust::exclusive_scan(gpudata.begin(), gpudata.end(), gpudata.begin()); //copy back to host thrust::copy(gpudata.begin(), gpudata.end(), data.begin()); cudaEventRecord(stop1, NULL); cudaEventSynchronize(stop1); cudaEventElapsedTime(&t_scan, start1, stop1); printf("\n Scan time is %f ms", t_scan); // thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); for (int i= 0; i<DATA_SIZE; i++) { if(fabs(h[i] - gpudata[i])==0.001) count++; else break; } if (count<DATA_SIZE) printf("\n Error!!"); else printf("Looks good"); }
d47e02ebdbb967a0727dd64c0f18d35b5c467592.hip
// !!! This is a file automatically generated by hipify!!! /* Matrix Inversion * Group F: M. Lechner, P. Knbel, J. Lvhall * * Tools used for debugging printing and more */ #include "includes.h" /* Debug output */ void tools_gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); } } /* Allocates the memory and creates a ID Matrix with n x n dimension */ float * tools_create_identity_matrix(int n){ float * out = (float *)malloc(sizeof(float)*n*n); int i; for(i = 0; i < n; i++){ int j; for(j = 0;j < n; j++){ out[i*n + j] = 0.0; } out[i*n + i] = 1.0; } return out; } /* Print a Matrix with with N x N dimension */ void tools_print_matrix(float * matrix, int N){ int i; for(i = 0; i < N; ++i){ int j; for(j = 0;j < N; ++j){ printf("%f ",matrix[i*N + j]); } printf("\n\r"); } printf("\n\r"); } /* Print a Matrix more beautiful */ void tools_WAprint(int size_of_one_side, float * matrix){ printf("WA output form:\n"); printf("inverse {"); for(int x = 0; x < size_of_one_side; x++) { printf("{"); for(int y = 0; y < size_of_one_side; y++) { printf("%1.0f", matrix[x*size_of_one_side + y]); if(y != size_of_one_side-1) printf(","); } printf("}"); if(x != size_of_one_side-1) printf(","); } printf("}\n"); } /* checks for zero with a window of e^-5 */ int tools_zero(float f){ if(abs(f*1e5) < 1){ return 1; } return 0; } /* Reads a matrix from stin returns the size of the matrix on success, otherwise -1 */ int tools_read_matrix(float *** _matrix){ int N; int r = scanf("%d",&N); if(r== EOF){ printf("error reading input\n\rexiting"); return -1; }else if (r == 0){ printf("no input, please supply a matrix to stdin"); return -1; } //we read the integer //create arrays to store the matrix in float ** matrix = (float **)malloc((sizeof(float*)*N)); //square matrix N*N int i; for(i = 0; i < N; ++i){ matrix[i] = (float*)malloc(sizeof(float)*N); } for(i = 0; i < N; ++i){ int j; for(j = 0;j < N; ++j){ int r; int t = 0; //tries while((r = scanf("%f",&(matrix[i][j]))) == 0 && t < 100){ t++; } if(r == EOF){ printf("error reading input on row %d and column %d \n\rexiting",i,j); // free_matrix(matrix,N); return -1; } else if(r == 0){ printf("failed to read input after multiple tries\n\r"); //free_matrix(matrix,N); return -1; } } } *_matrix = matrix; return N; } /* simply check the bit patterns.. hope that the gpu uses the same precision as the cpu */ int tools_is_equal(float * a, float * b, int size){ int i; int ret = 1; for(i = 0;i < size;i++){ if(abs(a[i] - b[i]) > 0.00001){ printf("element %d is not equal. GPU = %f, CPU = %f\n",i,a[i],b[i]); ret = 0; } } return ret; }
d47e02ebdbb967a0727dd64c0f18d35b5c467592.cu
/* Matrix Inversion * Group F: M. Lechner, P. Knöbel, J. Lövhall * * Tools used for debugging printing and more */ #include "includes.h" /* Debug output */ void tools_gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); } } /* Allocates the memory and creates a ID Matrix with n x n dimension */ float * tools_create_identity_matrix(int n){ float * out = (float *)malloc(sizeof(float)*n*n); int i; for(i = 0; i < n; i++){ int j; for(j = 0;j < n; j++){ out[i*n + j] = 0.0; } out[i*n + i] = 1.0; } return out; } /* Print a Matrix with with N x N dimension */ void tools_print_matrix(float * matrix, int N){ int i; for(i = 0; i < N; ++i){ int j; for(j = 0;j < N; ++j){ printf("%f ",matrix[i*N + j]); } printf("\n\r"); } printf("\n\r"); } /* Print a Matrix more beautiful */ void tools_WAprint(int size_of_one_side, float * matrix){ printf("WA output form:\n"); printf("inverse {"); for(int x = 0; x < size_of_one_side; x++) { printf("{"); for(int y = 0; y < size_of_one_side; y++) { printf("%1.0f", matrix[x*size_of_one_side + y]); if(y != size_of_one_side-1) printf(","); } printf("}"); if(x != size_of_one_side-1) printf(","); } printf("}\n"); } /* checks for zero with a window of e^-5 */ int tools_zero(float f){ if(abs(f*1e5) < 1){ return 1; } return 0; } /* Reads a matrix from stin returns the size of the matrix on success, otherwise -1 */ int tools_read_matrix(float *** _matrix){ int N; int r = scanf("%d",&N); if(r== EOF){ printf("error reading input\n\rexiting"); return -1; }else if (r == 0){ printf("no input, please supply a matrix to stdin"); return -1; } //we read the integer //create arrays to store the matrix in float ** matrix = (float **)malloc((sizeof(float*)*N)); //square matrix N*N int i; for(i = 0; i < N; ++i){ matrix[i] = (float*)malloc(sizeof(float)*N); } for(i = 0; i < N; ++i){ int j; for(j = 0;j < N; ++j){ int r; int t = 0; //tries while((r = scanf("%f",&(matrix[i][j]))) == 0 && t < 100){ t++; } if(r == EOF){ printf("error reading input on row %d and column %d \n\rexiting",i,j); // free_matrix(matrix,N); return -1; } else if(r == 0){ printf("failed to read input after multiple tries\n\r"); //free_matrix(matrix,N); return -1; } } } *_matrix = matrix; return N; } /* simply check the bit patterns.. hope that the gpu uses the same precision as the cpu */ int tools_is_equal(float * a, float * b, int size){ int i; int ret = 1; for(i = 0;i < size;i++){ if(abs(a[i] - b[i]) > 0.00001){ printf("element %d is not equal. GPU = %f, CPU = %f\n",i,a[i],b[i]); ret = 0; } } return ret; }
996bda1a8fe3b9128b87c9b89ab2adecd135c372.hip
// !!! This is a file automatically generated by hipify!!! // #pragma once #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <iostream> #include "hip/hip_runtime.h" #include <iomanip> // std::setprecision #include "device_launch_parameters.h" #include "input.h" using namespace std; #define NUM_THREADS_PER_BLOCK 512 int* create_shifts (char* pattern); int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len, int myId); __global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len); __global__ void prescan(int *g_odata, int *g_idata, int n); int determineNumBlocks(vector<string_chunk> chunks) { int numBlocks = 0; for (int i = 0; i < chunks.size(); i = i + NUM_THREADS_PER_BLOCK) { numBlocks++; } return numBlocks; } /* * Driver function * argv[0] is target pattern string * argv[1] is text path */ int main(int argc, char* argv[]) { const int TABLE_SIZ = 126; int target_len = 0; if (argc == 2 && (strcmp(argv[1], "-h") || strcmp(argv[1], "--help"))){ cout << "`match.exe` finds exact matches to a target string in text files." << endl << "Type ./main.exe {target_string} {text file path} to use the program." << endl << "Text file paths must be relative to the directory of `main.exe`." << endl; exit(0); } else if (argc == 1) { cout << "ERROR: Please pass in a target string and a file path." << endl; exit(-1); } for (int i = 1; i < argc - 1; ++i) { target_len += strlen(argv[i]); } target_len += argc - 3; Input inputObj(argv[argc - 1]); char* flatText = inputObj.flattenText(); char* testPattern = (char*)malloc(target_len * sizeof(char) + 1); string input = argv[1]; if (argc > 3) { for (int i = 2; i < argc - 1; ++i) { input = input + " " + argv[i]; } } strcpy (testPattern, input.data()); testPattern[target_len] = '\0'; int* skipTable = create_shifts(testPattern); unsigned int* numMatches = (unsigned int*)malloc(1 * sizeof(unsigned int)); *numMatches = 0; int fullTextSize = inputObj.getChunks().size() * CHUNK_SIZE * sizeof(char); int patternSize = strlen(testPattern) * sizeof(char); int skipTableSize = TABLE_SIZ * sizeof(int); char* d_fullText; char* d_testPattern; int* d_skipTable; unsigned int* d_numMatches; unsigned int* parallel_result = (unsigned int*) malloc(sizeof(unsigned int)); hipMalloc((void**)& d_fullText, fullTextSize); hipMalloc((void**)& d_testPattern, patternSize); hipMalloc((void**)& d_skipTable, skipTableSize); hipMalloc((void**)& d_numMatches, sizeof(unsigned int)); hipMemcpy(d_fullText, flatText, fullTextSize, hipMemcpyHostToDevice); hipMemcpy(d_testPattern, testPattern, patternSize, hipMemcpyHostToDevice); hipMemcpy(d_skipTable, skipTable, skipTableSize, hipMemcpyHostToDevice); hipMemcpy(d_numMatches, numMatches, sizeof(unsigned int), hipMemcpyHostToDevice); time_t start, end, start1, end1 = 0; int text_len = strlen(flatText); int pat_len = strlen(testPattern); int num_chunks = inputObj.getChunks().size(); int numBlocks = determineNumBlocks(inputObj.getChunks()); hipDeviceSynchronize(); time(&start); start = clock(); horspool_match << <numBlocks, NUM_THREADS_PER_BLOCK, NUM_THREADS_PER_BLOCK * sizeof(int) >> > (d_fullText, d_testPattern, d_skipTable, d_numMatches, CHUNK_SIZE, num_chunks, text_len, pat_len); hipDeviceSynchronize(); hipMemcpy(parallel_result, d_numMatches, sizeof(unsigned int), hipMemcpyDeviceToHost); end = clock(); start1 = clock(); unsigned int result = 0; for(int myId =0; myId < numBlocks * NUM_THREADS_PER_BLOCK; myId++){ result += linear_horspool_match(flatText, testPattern, skipTable, numMatches, CHUNK_SIZE, num_chunks, text_len, pat_len, myId); } end1 = clock(); hipDeviceSynchronize(); // Calculating total time taken by the program. double time_taken = double(end - start)/ CLOCKS_PER_SEC; cout << "Time taken by parallel program: " << setprecision(9) << time_taken << " secs." << endl; cout << "There are " << *parallel_result << " exact matches to string `" << input << "`" << endl << "found by parallel program in file `" << argv[argc-1] <<"`"<< endl << endl; time_taken = double(end1 - start1)/ CLOCKS_PER_SEC; cout << "Time taken by linear program: " << setprecision(9) << time_taken << " secs." << endl; cout << "There are " << result << " exact matches to string `" << input << "`" << endl << "found by linear program in file `" << argv[argc-1] <<"`"<< endl; hipFree(d_fullText); hipFree(d_testPattern); hipFree(d_skipTable); hipFree(d_numMatches); free(testPattern); free(skipTable); free(numMatches); } int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len, int myId) { const int TABLE_SIZ = 126; int count = 0; int text_length = (chunk_size * myId) + chunk_size + pat_len - 1; // don't need to check first pattern_length - 1 characters int i = (myId*chunk_size) + pat_len - 1; int k = 0; while(i < text_length) { // reset matched character count k = 0; if (i >= text_size) { // break out if i tries to step past text length break; } if (text[i] >= TABLE_SIZ || text[i] < 0) { // move to next char if unknown char (Unicode, etc.) ++i; } else { while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) { // increment matched character count k++; } if(k == pat_len) { // increment pattern count, text index ++count; ++i; } else { // add on shift if known char i = i + shift_table[text[i]]; } } } return count; // Add count to total matches atomically } /** * Purpose: * Boyer-Moore-Horspool pattern matching algorithm implementation * * Args: * text {char*}: Text c-string - still text * pattern {char*}: Target c-string - still pattern * shift_table {int*}: Skip table - shift table * num_matches {int}: Total match count - num_matches * chunk_size {int}: Length of chunk size * num_chunks {int}: Total number of chunks * text_size {int}: Integer text length * pat_len {int}: Integer pattern length * Returns: * None */ __global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len) { const int TABLE_SIZ = 126; int count = 0; int myId = threadIdx.x + blockDim.x * blockIdx.x; if(myId > num_chunks){ //if thread is an invalid thread return; } int text_length = (chunk_size * myId) + chunk_size + pat_len - 1; // don't need to check first pattern_length - 1 characters int i = (myId*chunk_size) + pat_len - 1; int k = 0; while(i < text_length) { // reset matched character count k = 0; if (i >= text_size) { // break out if i tries to step past text length break; } if (text[i] >= TABLE_SIZ || text[i] < 0) { // move to next char if unknown char (Unicode, etc.) ++i; } else { while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) { // increment matched character count k++; } if(k == pat_len) { // increment pattern count, text index ++count; ++i; } else { // add on shift if known char i = i + shift_table[text[i]]; } } } atomicAdd(num_matches, count); } /** * Purpose: * Create shift table for Boyer-Moore-Horspool algorithm * * Args: * pattern {char*}: desired pattern c-string */ int* create_shifts (char* pattern) { // Printable ASCII chars are 32-126 inclusive, line break is 10 const int TABLE_SIZ = 126; const int FIRST_ASCII = 32; int length = strlen(pattern); int* shift_table = (int*) malloc (sizeof(int) * TABLE_SIZ); for(int i = 0; i < TABLE_SIZ; i++) { // set all entries to longest shift (pattern length) shift_table[i] = length; } for(int j = 0; j < length - 1; j++) { // set pattern characters to shortest shifts shift_table[pattern[j]] = length - 1 - j; } // assign shift of 1 for unprintable characters for (int i = 0; i < FIRST_ASCII; ++i) { shift_table[i] = 1; } return shift_table; }
996bda1a8fe3b9128b87c9b89ab2adecd135c372.cu
// #pragma once #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <iostream> #include "cuda_runtime.h" #include <iomanip> // std::setprecision #include "device_launch_parameters.h" #include "input.h" using namespace std; #define NUM_THREADS_PER_BLOCK 512 int* create_shifts (char* pattern); int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len, int myId); __global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len); __global__ void prescan(int *g_odata, int *g_idata, int n); int determineNumBlocks(vector<string_chunk> chunks) { int numBlocks = 0; for (int i = 0; i < chunks.size(); i = i + NUM_THREADS_PER_BLOCK) { numBlocks++; } return numBlocks; } /* * Driver function * argv[0] is target pattern string * argv[1] is text path */ int main(int argc, char* argv[]) { const int TABLE_SIZ = 126; int target_len = 0; if (argc == 2 && (strcmp(argv[1], "-h") || strcmp(argv[1], "--help"))){ cout << "`match.exe` finds exact matches to a target string in text files." << endl << "Type ./main.exe {target_string} {text file path} to use the program." << endl << "Text file paths must be relative to the directory of `main.exe`." << endl; exit(0); } else if (argc == 1) { cout << "ERROR: Please pass in a target string and a file path." << endl; exit(-1); } for (int i = 1; i < argc - 1; ++i) { target_len += strlen(argv[i]); } target_len += argc - 3; Input inputObj(argv[argc - 1]); char* flatText = inputObj.flattenText(); char* testPattern = (char*)malloc(target_len * sizeof(char) + 1); string input = argv[1]; if (argc > 3) { for (int i = 2; i < argc - 1; ++i) { input = input + " " + argv[i]; } } strcpy (testPattern, input.data()); testPattern[target_len] = '\0'; int* skipTable = create_shifts(testPattern); unsigned int* numMatches = (unsigned int*)malloc(1 * sizeof(unsigned int)); *numMatches = 0; int fullTextSize = inputObj.getChunks().size() * CHUNK_SIZE * sizeof(char); int patternSize = strlen(testPattern) * sizeof(char); int skipTableSize = TABLE_SIZ * sizeof(int); char* d_fullText; char* d_testPattern; int* d_skipTable; unsigned int* d_numMatches; unsigned int* parallel_result = (unsigned int*) malloc(sizeof(unsigned int)); cudaMalloc((void**)& d_fullText, fullTextSize); cudaMalloc((void**)& d_testPattern, patternSize); cudaMalloc((void**)& d_skipTable, skipTableSize); cudaMalloc((void**)& d_numMatches, sizeof(unsigned int)); cudaMemcpy(d_fullText, flatText, fullTextSize, cudaMemcpyHostToDevice); cudaMemcpy(d_testPattern, testPattern, patternSize, cudaMemcpyHostToDevice); cudaMemcpy(d_skipTable, skipTable, skipTableSize, cudaMemcpyHostToDevice); cudaMemcpy(d_numMatches, numMatches, sizeof(unsigned int), cudaMemcpyHostToDevice); time_t start, end, start1, end1 = 0; int text_len = strlen(flatText); int pat_len = strlen(testPattern); int num_chunks = inputObj.getChunks().size(); int numBlocks = determineNumBlocks(inputObj.getChunks()); cudaDeviceSynchronize(); time(&start); start = clock(); horspool_match << <numBlocks, NUM_THREADS_PER_BLOCK, NUM_THREADS_PER_BLOCK * sizeof(int) >> > (d_fullText, d_testPattern, d_skipTable, d_numMatches, CHUNK_SIZE, num_chunks, text_len, pat_len); cudaDeviceSynchronize(); cudaMemcpy(parallel_result, d_numMatches, sizeof(unsigned int), cudaMemcpyDeviceToHost); end = clock(); start1 = clock(); unsigned int result = 0; for(int myId =0; myId < numBlocks * NUM_THREADS_PER_BLOCK; myId++){ result += linear_horspool_match(flatText, testPattern, skipTable, numMatches, CHUNK_SIZE, num_chunks, text_len, pat_len, myId); } end1 = clock(); cudaDeviceSynchronize(); // Calculating total time taken by the program. double time_taken = double(end - start)/ CLOCKS_PER_SEC; cout << "Time taken by parallel program: " << setprecision(9) << time_taken << " secs." << endl; cout << "There are " << *parallel_result << " exact matches to string `" << input << "`" << endl << "found by parallel program in file `" << argv[argc-1] <<"`"<< endl << endl; time_taken = double(end1 - start1)/ CLOCKS_PER_SEC; cout << "Time taken by linear program: " << setprecision(9) << time_taken << " secs." << endl; cout << "There are " << result << " exact matches to string `" << input << "`" << endl << "found by linear program in file `" << argv[argc-1] <<"`"<< endl; cudaFree(d_fullText); cudaFree(d_testPattern); cudaFree(d_skipTable); cudaFree(d_numMatches); free(testPattern); free(skipTable); free(numMatches); } int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len, int myId) { const int TABLE_SIZ = 126; int count = 0; int text_length = (chunk_size * myId) + chunk_size + pat_len - 1; // don't need to check first pattern_length - 1 characters int i = (myId*chunk_size) + pat_len - 1; int k = 0; while(i < text_length) { // reset matched character count k = 0; if (i >= text_size) { // break out if i tries to step past text length break; } if (text[i] >= TABLE_SIZ || text[i] < 0) { // move to next char if unknown char (Unicode, etc.) ++i; } else { while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) { // increment matched character count k++; } if(k == pat_len) { // increment pattern count, text index ++count; ++i; } else { // add on shift if known char i = i + shift_table[text[i]]; } } } return count; // Add count to total matches atomically } /** * Purpose: * Boyer-Moore-Horspool pattern matching algorithm implementation * * Args: * text {char*}: Text c-string - still text * pattern {char*}: Target c-string - still pattern * shift_table {int*}: Skip table - shift table * num_matches {int}: Total match count - num_matches * chunk_size {int}: Length of chunk size * num_chunks {int}: Total number of chunks * text_size {int}: Integer text length * pat_len {int}: Integer pattern length * Returns: * None */ __global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len) { const int TABLE_SIZ = 126; int count = 0; int myId = threadIdx.x + blockDim.x * blockIdx.x; if(myId > num_chunks){ //if thread is an invalid thread return; } int text_length = (chunk_size * myId) + chunk_size + pat_len - 1; // don't need to check first pattern_length - 1 characters int i = (myId*chunk_size) + pat_len - 1; int k = 0; while(i < text_length) { // reset matched character count k = 0; if (i >= text_size) { // break out if i tries to step past text length break; } if (text[i] >= TABLE_SIZ || text[i] < 0) { // move to next char if unknown char (Unicode, etc.) ++i; } else { while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) { // increment matched character count k++; } if(k == pat_len) { // increment pattern count, text index ++count; ++i; } else { // add on shift if known char i = i + shift_table[text[i]]; } } } atomicAdd(num_matches, count); } /** * Purpose: * Create shift table for Boyer-Moore-Horspool algorithm * * Args: * pattern {char*}: desired pattern c-string */ int* create_shifts (char* pattern) { // Printable ASCII chars are 32-126 inclusive, line break is 10 const int TABLE_SIZ = 126; const int FIRST_ASCII = 32; int length = strlen(pattern); int* shift_table = (int*) malloc (sizeof(int) * TABLE_SIZ); for(int i = 0; i < TABLE_SIZ; i++) { // set all entries to longest shift (pattern length) shift_table[i] = length; } for(int j = 0; j < length - 1; j++) { // set pattern characters to shortest shifts shift_table[pattern[j]] = length - 1 - j; } // assign shift of 1 for unprintable characters for (int i = 0; i < FIRST_ASCII; ++i) { shift_table[i] = 1; } return shift_table; }
ce79878d892de2f7c40a6f80fd43e97c27d131b0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "addNccValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *prevData = NULL; hipMalloc(&prevData, XSIZE*YSIZE); float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int patches = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( addNccValues), dim3(gridBlock),dim3(threadBlock), 0, 0, prevData,result,patches); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( addNccValues), dim3(gridBlock),dim3(threadBlock), 0, 0, prevData,result,patches); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( addNccValues), dim3(gridBlock),dim3(threadBlock), 0, 0, prevData,result,patches); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ce79878d892de2f7c40a6f80fd43e97c27d131b0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "addNccValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *prevData = NULL; cudaMalloc(&prevData, XSIZE*YSIZE); float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int patches = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); addNccValues<<<gridBlock,threadBlock>>>(prevData,result,patches); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { addNccValues<<<gridBlock,threadBlock>>>(prevData,result,patches); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { addNccValues<<<gridBlock,threadBlock>>>(prevData,result,patches); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
eaedf9eaaccab0fc1d415da48079d0bf587e763f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust\sort.h> #include <thrust\device_ptr.h> #include <thrust\for_each.h> #include <thrust\iterator\zip_iterator.h> #include <iostream> #include <iomanip> #include <fstream> #include "particle_simulation.h" #include "util.h" #include "cgtk\include\clock.h" #include "boundary_map.h" #include <thrust\scan.h> #include <stdexcept> #include "portable_pixmap.h" #include "arr.h" using namespace std; //----------------------------------------------------------------------------- // DEVICE CODE //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // global device variables //----------------------------------------------------------------------------- __constant__ SimulationParameters gSimParamsDev; texture<float, hipTextureType1D, hipReadModeElementType> gParticleVertexData; texture<float, hipTextureType1D, hipReadModeElementType> gParticleSimulationData; texture<int, hipTextureType1D, hipReadModeElementType> gCellStartList; texture<int, hipTextureType1D, hipReadModeElementType> gCellEndList; texture<int, hipTextureType1D, hipReadModeElementType> gSortedParticleIdList; texture<int, hipTextureType1D, hipReadModeElementType> gParticleHashList; // information about boundary handling __constant__ float gBoundaryGridOrigin[3]; __constant__ float gBoundaryGridSpacing; __constant__ unsigned int gBoundaryGridDimensions[3]; __constant__ float gBoundaryGridLength[3]; __constant__ float gBoundaryRestDistance; texture<float, hipTextureType3D, hipReadModeElementType> gBoundaryDistances; texture<float, hipTextureType3D, hipReadModeElementType> gBoundaryDensities; texture<float, hipTextureType3D, hipReadModeElementType> gBoundaryViscosities; //----------------------------------------------------------------------------- // declaration of aux. functions (device) //----------------------------------------------------------------------------- __device__ inline int3 compute_grid_coordinate (float3 pos, float d); __device__ inline int3 compute_grid_coordinate_sub_particles(float3 pos, float d); __device__ inline int compute_hash_from_grid_coordinate (int i, int j, int k); __device__ inline int compute_hash_from_grid_coordinate_sub_particle (int i, int j, int k); __device__ inline float compute_distance (float3 a, float3 b); __device__ inline float compute_squared_distance (float3 a, float3 b); __device__ inline float norm (const float3& a); __device__ inline void normalize (float3& a); __device__ inline float dot_product (const float3& a, const float3& b); __device__ float compute_particle_density_cell (const float3 &pos, float* pParticleList, int* pParticleIdList, int start, int end); __device__ float compute_sub_particle_density_cell (const float3 &pos, float* particleVertexData, int* particleIdList, int start, int end); __device__ inline void compute_viscosity_pressure_forces_and_ifsurf_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* pressureForce, float3* viscosityForce, float3* colGra, float* colLapl, float3* sumPosNeighbor, float* nNeighbors); __device__ inline void compute_sub_particle_viscosity_pressure_forces_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* force, float3* colGra, float* colLapl); __device__ inline void project_quantities_cell (float3& acc, float& density, float& pressure, float& numNeighbors, const float3& xi, int start, int end); //----------------------------------------------------------------------------- // CUDA Kernel definitions //----------------------------------------------------------------------------- __global__ void compute_particle_hash (float* particleVertexData, int* particleIdList, int* particleHashList, unsigned int numParticles) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx >= numParticles) { return; } // calculate corresponding gridpoint int x = (int)((tex1Dfetch(gParticleVertexData, idx*VD_NUM_ELEMENTS + VD_POS_X) - gSimParamsDev.gridOrigin[0])/gSimParamsDev.gridSpacing); int y = (int)((tex1Dfetch(gParticleVertexData, idx*VD_NUM_ELEMENTS + VD_POS_Y) - gSimParamsDev.gridOrigin[1])/gSimParamsDev.gridSpacing); int z = (int)((tex1Dfetch(gParticleVertexData, idx*VD_NUM_ELEMENTS + VD_POS_Z) - gSimParamsDev.gridOrigin[2])/gSimParamsDev.gridSpacing); // wrap outer particles to grid // TODO: modulo operation using "&" is faster, requires grid dims of // power of two x = x % gSimParamsDev.gridDim[0]; y = y % gSimParamsDev.gridDim[1]; z = z % gSimParamsDev.gridDim[2]; // calculate hash, i.e. grid cell id int hash = gSimParamsDev.gridDim[0]*(gSimParamsDev.gridDim[1]*z + y) + x; particleIdList[idx] = idx; particleHashList[idx] = hash; } //----------------------------------------------------------------------------- __global__ void compute_sub_particle_hash (float* particleVertexData, int* particleIdList, int* particleHashList, unsigned int numParticles) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = particleIdList[idx]; // calculate corresponding gridpoint int x = (int)((particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X] - gSimParamsDev.gridOrigin[0])/gSimParamsDev.gridSpacingSubParticles); int y = (int)((particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y] - gSimParamsDev.gridOrigin[1])/gSimParamsDev.gridSpacingSubParticles); int z = (int)((particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z] - gSimParamsDev.gridOrigin[2])/gSimParamsDev.gridSpacingSubParticles); // wrap outer particles to grid // TODO: modulo operation using "&" is faster, requires grid dims of // power of two x = x % gSimParamsDev.gridDimSubParticles[0]; y = y % gSimParamsDev.gridDimSubParticles[1]; z = z % gSimParamsDev.gridDimSubParticles[2]; // calculate hash, i.e. grid cell id int hash = gSimParamsDev.gridDimSubParticles[0]* (gSimParamsDev.gridDimSubParticles[1]*z + y) + x; particleHashList[idx] = hash; } //----------------------------------------------------------------------------- __global__ void compute_cell_start_end (int* particleHashList, int* cellStartList, int* cellEndList, unsigned int numParticles) { extern __shared__ int sharedHash[]; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; int hash; if (idx < numParticles) { hash = particleHashList[idx]; sharedHash[threadIdx.x + 1] = hash; if (idx > 0 && threadIdx.x == 0) { sharedHash[0] = particleHashList[idx - 1]; } } __syncthreads(); if (idx < numParticles) { if (idx == 0 || hash != sharedHash[threadIdx.x]) { cellStartList[hash] = idx; if (idx > 0) { cellEndList[sharedHash[threadIdx.x]] = idx; } } if (idx == numParticles - 1) { cellEndList[hash] = idx + 1; } } } //----------------------------------------------------------------------------- // Compute density and pressure for each particle __global__ void compute_particle_density_pressure (float* particleVertexData, float* particleSimulationData, int* particleIdList, int* cellStartList, int* cellEndList) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= gSimParamsDev.numParticles) { return; } int id = particleIdList[idx]; float density = 0.0f; float pressure; float3 pos; // get particles position form vertex data pos.x = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_X); pos.y = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Y); pos.z = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Z); int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupport); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupport); int hash; int start; int end; // compute density contribution from neighbor particles for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = tex1Dfetch(gCellStartList, hash); end = tex1Dfetch(gCellEndList, hash); density += compute_particle_density_cell(pos, particleVertexData, particleIdList, start, end); } } } density *= gSimParamsDev.particleMass; // compute density contribution from the wall float u = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float v = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float w = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float densWall = tex3D(gBoundaryDensities, u, v, w); density += densWall; pressure = gSimParamsDev.gasStiffness*(density - gSimParamsDev.restDensity); // set density and pressure particleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY] = density; particleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE] = pressure; } //----------------------------------------------------------------------------- // Compute density and pressure for each sub particle __global__ void compute_sub_particle_density_pressure (float* subParticleVertexData, float* subParticleSimulationData, int* particleIdList, int* particleSortedIdList, int* cellStartList, int* cellEndList, unsigned int numParticles) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= numParticles) { return; } int id = particleIdList[idx]; float density = 0.0f; float pressure; float3 pos; // get particles position form vertex data pos.x = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; int3 c0 = compute_grid_coordinate_sub_particles(pos, -gSimParamsDev.compactSupportSub); int3 c1 = compute_grid_coordinate_sub_particles(pos, gSimParamsDev.compactSupportSub); int hash; int start; int end; for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate_sub_particle(i, j, k); start = cellStartList[hash]; end = cellEndList[hash]; density += compute_sub_particle_density_cell(pos, subParticleVertexData, particleSortedIdList, start, end); } } } density *= gSimParamsDev.subParticleMass; pressure = gSimParamsDev.gasStiffness*(density - gSimParamsDev.restDensity); // set density and pressure subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY] = density; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE] = pressure; } //----------------------------------------------------------------------------- __global__ void compute_particle_acceleration_ifsurf (float* particleVertexData, float* particleSimulationData, int* particleIdList, int* cellStartList, int* cellEndList, int* isSurfaceParticle) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } int id = tex1Dfetch(gSortedParticleIdList, idx); float density = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_DENSITY); float pressure = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_PRESSURE); float tenCoeff = gSimParamsDev.tensionCoefficient; float3 pos; pos.x = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_X); pos.y = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Y); pos.z = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Z); float3 vel; vel.x = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_VEL0_X); vel.y = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_VEL0_Y); vel.z = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_VEL0_Z); int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupport); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupport); float3 force; force.x = 0.0f; force.y = 0.0f; force.z = 0.0f; float3 pressureForce; pressureForce.x = 0.0f; pressureForce.y = 0.0f; pressureForce.z = 0.0f; float3 viscosityForce; viscosityForce.x = 0.0f; viscosityForce.y = 0.0f; viscosityForce.z = 0.0f; float3 colGra; colGra.x = 0.0f; colGra.y = 0.0f; colGra.z = 0.0f; // [sumPosNeigbor] and [nNeigbors] are used to computed the center of mass // of the neighborhood of this particle (this also includes the particle // itself float3 sumPosNeighbor; sumPosNeighbor.x = pos.x; sumPosNeighbor.x = pos.y; sumPosNeighbor.x = pos.z; float nNeighbors = 1.0f; float colLapl; float colGraNorm; float grav = gSimParamsDev.gravity; int hash; int start; int end; // compute viscosity and pressure forces for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = tex1Dfetch(gCellStartList, hash); end = tex1Dfetch(gCellEndList, hash); compute_viscosity_pressure_forces_and_ifsurf_cell(pos, density, pressure, vel, particleVertexData, particleSimulationData, particleIdList, start, end, &pressureForce, &viscosityForce, &colGra, &colLapl, &sumPosNeighbor, &nNeighbors); } } } // compute distance to wall float u = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float v = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float w = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float distWall = -tex3D(gBoundaryDistances, u, v, w); // add viscosity force force.x += viscosityForce.x; force.y += viscosityForce.y; force.z += viscosityForce.z; // add pressure force force.x += pressureForce.x; force.y += pressureForce.y; force.z += pressureForce.z; float coeff = density/ (gSimParamsDev.timeStep*gSimParamsDev.timeStep)* (gBoundaryRestDistance - distWall); if (distWall < gBoundaryRestDistance) { float dX = gBoundaryGridSpacing/gBoundaryGridLength[0]; float dY = gBoundaryGridSpacing/gBoundaryGridLength[1]; float dZ = gBoundaryGridSpacing/gBoundaryGridLength[2]; float3 graN; graN.x = (tex3D(gBoundaryDistances, u + dX, v, w) - tex3D(gBoundaryDistances, u - dX, v, w))/(2*dX); graN.y = (tex3D(gBoundaryDistances, u, v + dY, w) - tex3D(gBoundaryDistances, u, v - dY, w))/(2*dY); graN.z = (tex3D(gBoundaryDistances, u, v, w + dZ) - tex3D(gBoundaryDistances, u, v, w - dZ))/(2*dZ); //normalize(graN); // in boundary handling case just, add the pressure force to the force force.x -= coeff*graN.x; force.y -= coeff*graN.y; force.z -= coeff*graN.z; //// viscosity contribution of the wall float visWallCoeff = tex3D(gBoundaryViscosities, u, v, w); force.x -= vel.x*visWallCoeff; force.y -= vel.y*visWallCoeff; force.z -= vel.z*visWallCoeff; } else { // add surface tension force colGraNorm = sqrtf(colGra.x*colGra.x + colGra.y*colGra.y + colGra.z*colGra.z); float fCoeff = tenCoeff*colLapl/colGraNorm; if(colGraNorm > gSimParamsDev.normThresh) { force.x -= fCoeff*colGra.x; force.y -= fCoeff*colGra.y; force.z -= fCoeff*colGra.z; } } // store the actual acceleration particleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_X] = force.x/density; particleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Y] = force.y/density - grav; particleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Z] = force.z/density; } //----------------------------------------------------------------------------- __global__ void compute_sub_particle_acceleration (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIdList, int* subParticleSortedIdList, int* cellStartList, int* cellEndList, unsigned int numParticles) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = subParticleIdList[idx]; float density = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY]; float pressure = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE]; float tenCoeff = gSimParamsDev.tensionCoefficient; float3 pos; pos.x = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; float3 vel; vel.x = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_X]; vel.y = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Y]; vel.z = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Z]; int3 c0 = compute_grid_coordinate_sub_particles(pos, -gSimParamsDev.compactSupportSub); int3 c1 = compute_grid_coordinate_sub_particles(pos, gSimParamsDev.compactSupportSub); float3 force; force.x = 0.0f; force.y = 0.0f; force.z = 0.0f; float3 colGra; colGra.x = 0.0f; colGra.y = 0.0f; colGra.z = 0.0f; float colLapl; float colGraNorm; float grav = gSimParamsDev.gravity; int hash; int start; int end; // compute viscosity and pressure forces for (int k = c0.z; k <= c1.z; k++) { for (int j = c0.y; j <= c1.y; j++) { for (int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate_sub_particle(i, j, k); start = cellStartList[hash]; end = cellEndList[hash]; compute_sub_particle_viscosity_pressure_forces_cell(pos, density, pressure, vel, subParticleVertexData, subParticleSimulationData, subParticleSortedIdList, start, end, &force, &colGra, &colLapl); } } } // surface tension colGraNorm = sqrtf(colGra.x*colGra.x + colGra.y*colGra.y + colGra.z*colGra.z); float fCoeff = tenCoeff*colLapl/colGraNorm; if (colGraNorm > gSimParamsDev.normThresh) { force.x -= fCoeff*colGra.x; force.y -= fCoeff*colGra.y; force.z -= fCoeff*colGra.z; } // store the actual acceleration subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_X] = force.x/density; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Y] = force.y/density - grav; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Z] = force.z/density; } //---------------------------------------------------------------------------- __global__ void project_quantities (float* subParticleVertexData, float* subParticleSimulationData, float* particleVertexData, float* particleSimulationData, int* subParticleIds, unsigned int numParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = subParticleIds[idx + offset]; float3 pos; pos.x = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupportSub); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupportSub); float3 acc; acc.x = 0.0f; acc.y = 0.0f; acc.z = 0.0f; float density = 0.0f; float pressure = 0.0f; float numNeighbours = 0.0f; int hash; int start; int end; // compute viscosity and pressure forces for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = tex1Dfetch(gCellStartList, hash); end = tex1Dfetch(gCellEndList, hash); project_quantities_cell(acc, density, pressure, numNeighbours, pos, start, end); } } } subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY] = density/numNeighbours; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE] = pressure/numNeighbours; } //---------------------------------------------------------------------------- /*__global__ void compute_sub_particle_acceleration (float* particleVertexData, float* particleSimulationData, int* particleIdList, int* cellStartList, int* cellEndList, int* isSurfaceParticle) { }*/ //----------------------------------------------------------------------------- __global__ void integrate_euler (float* particleVertexData, float* particleSimulationData) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int idVert = idx*VD_NUM_ELEMENTS; unsigned int idSim = idx*SD_NUM_ELEMENTS; float dt = gSimParamsDev.timeStep; particleSimulationData[idSim + SD_VEL0_X] += dt*particleSimulationData[idSim + SD_ACC_X]; particleSimulationData[idSim + SD_VEL0_Y] += dt*particleSimulationData[idSim + SD_ACC_Y]; particleSimulationData[idSim + SD_VEL0_Z] += dt*particleSimulationData[idSim + SD_ACC_Z]; particleVertexData[idVert + VD_POS_X] += dt*particleSimulationData[idSim + SD_VEL0_X]; particleVertexData[idVert + VD_POS_Y] += dt*particleSimulationData[idSim + SD_VEL0_Y]; particleVertexData[idVert + VD_POS_Z] += dt*particleSimulationData[idSim + SD_VEL0_Z]; // compute density contribution from the wall /*float u = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float v = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float w = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float distWall = tex3D(gBoundaryDistances, u, v, w);*/ } //----------------------------------------------------------------------------- __global__ void integrate_sub_particles_euler (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIds, unsigned int nSubParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= nSubParticles) { return; } int id = subParticleIds[idx + offset]; unsigned int idVert = id*VD_NUM_ELEMENTS; unsigned int idSim = id*SD_NUM_ELEMENTS; float dt = gSimParamsDev.timeStepSubParticles; subParticleSimulationData[idSim + SD_VEL0_X] += dt*subParticleSimulationData[idSim + SD_ACC_X]; subParticleSimulationData[idSim + SD_VEL0_Y] += dt*subParticleSimulationData[idSim + SD_ACC_Y]; subParticleSimulationData[idSim + SD_VEL0_Z] += dt*subParticleSimulationData[idSim + SD_ACC_Z]; subParticleVertexData[idVert + VD_POS_X] += dt*subParticleSimulationData[idSim + SD_VEL0_X]; subParticleVertexData[idVert + VD_POS_Y] += dt*subParticleSimulationData[idSim + SD_VEL0_Y]; subParticleVertexData[idVert + VD_POS_Z] += dt*subParticleSimulationData[idSim + SD_VEL0_Z]; } //----------------------------------------------------------------------------- __global__ void integrate_boundary_sub_particles_euler (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIds, unsigned int nSubParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= nSubParticles) { return; } int id = subParticleIds[idx + offset]; unsigned int idVert = id*VD_NUM_ELEMENTS; unsigned int idSim = id*SD_NUM_ELEMENTS; float dt = gSimParamsDev.timeStep; /* subParticleSimulationData[idSim + SD_VEL0_X] += dt*subParticleSimulationData[idSim + SD_ACC_X]; subParticleSimulationData[idSim + SD_VEL0_Y] += dt*subParticleSimulationData[idSim + SD_ACC_Y]; subParticleSimulationData[idSim + SD_VEL0_Z] += dt*subParticleSimulationData[idSim + SD_ACC_Z]; */ subParticleVertexData[idVert + VD_POS_X] += dt*subParticleSimulationData[idSim + SD_VEL0_X]; subParticleVertexData[idVert + VD_POS_Y] += dt*subParticleSimulationData[idSim + SD_VEL0_Y]; subParticleVertexData[idVert + VD_POS_Z] += dt*subParticleSimulationData[idSim + SD_VEL0_Z]; } //----------------------------------------------------------------------------- __global__ void shift_state (char* particleState) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } particleState[idx] = (particleState[idx] << 2); } //----------------------------------------------------------------------------- __global__ void collision_handling (float* particleVertexData, float* particleSimulationData) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int idVert = idx*VD_NUM_ELEMENTS; unsigned int idSim = idx*SD_NUM_ELEMENTS; float3 pos; float3 vel; pos.x = tex1Dfetch(gParticleVertexData, idVert + VD_POS_X); pos.y = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Y); pos.z = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Z); vel.x = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_X); vel.y = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Y); vel.z = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Z); float3 local; float3 diff; float3 nrm; float dist; float depth; // compute "distance" to box, if positive the particle // is outside the box. // compute local position of the particle to the box local.x = pos.x - gSimParamsDev.boxCen[0]; local.y = pos.y - gSimParamsDev.boxCen[1]; local.z = pos.z - gSimParamsDev.boxCen[2]; // project local pos to the upper right quadrand and // compute difference to the boxDim vec diff.x = abs(local.x) - gSimParamsDev.boxDim[0]; diff.y = abs(local.y) - gSimParamsDev.boxDim[1]; diff.z = abs(local.z) - gSimParamsDev.boxDim[2]; dist = max(diff.x, diff.y); dist = max(dist, diff.z); // if the particle lies outside the box, the collision must be handled float3 contact; if (dist > 0.0f) { // contact point in "box space" contact.x = min(gSimParamsDev.boxDim[0], max(-gSimParamsDev.boxDim[0], local.x)); contact.y = min(gSimParamsDev.boxDim[1], max(-gSimParamsDev.boxDim[1], local.y)); contact.z = min(gSimParamsDev.boxDim[2], max(-gSimParamsDev.boxDim[2], local.z)); // translate to worldspace contact.x += gSimParamsDev.boxCen[0]; contact.y += gSimParamsDev.boxCen[1]; contact.z += gSimParamsDev.boxCen[2]; // compute penetration depth depth = compute_distance(contact, pos); // compute normal nrm.x = pos.x - contact.x; nrm.y = pos.y - contact.y; nrm.z = pos.z - contact.z; normalize(nrm); float velNorm = norm(vel); float dp = dot_product(nrm, vel); float coeff = (1 + gSimParamsDev.restitution*depth/ (gSimParamsDev.timeStep*velNorm))*dp; vel.x -= coeff*nrm.x; vel.y -= coeff*nrm.y; vel.z -= coeff*nrm.z; particleVertexData[idVert + VD_POS_X] = contact.x; particleVertexData[idVert + VD_POS_Y] = contact.y; particleVertexData[idVert + VD_POS_Z] = contact.z; particleSimulationData[idSim + SD_VEL0_X] = vel.x; particleSimulationData[idSim + SD_VEL0_Y] = vel.y; particleSimulationData[idSim + SD_VEL0_Z] = vel.z; } } //----------------------------------------------------------------------------- __global__ void collision_handling_sub_particles (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIds, unsigned int numParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = subParticleIds[idx + offset]; unsigned int idVert = id*VD_NUM_ELEMENTS; unsigned int idSim = id*SD_NUM_ELEMENTS; float3 pos; float3 vel; pos.x = subParticleVertexData[idVert + VD_POS_X]; pos.y = subParticleVertexData[idVert + VD_POS_Y]; pos.z = subParticleVertexData[idVert + VD_POS_Z]; vel.x = subParticleSimulationData[idSim + SD_VEL0_X]; vel.y = subParticleSimulationData[idSim + SD_VEL0_Y]; vel.z = subParticleSimulationData[idSim + SD_VEL0_Z]; float3 local; float3 diff; float3 nrm; float dist; float depth; // compute "distance" to box, if positive the particle // is outside the box. // compute local position of the particle to the box local.x = pos.x - gSimParamsDev.boxCen[0]; local.y = pos.y - gSimParamsDev.boxCen[1]; local.z = pos.z - gSimParamsDev.boxCen[2]; // project local pos to the upper right quadrand and // compute difference to the boxDim vec diff.x = abs(local.x) - gSimParamsDev.boxDim[0]; diff.y = abs(local.y) - gSimParamsDev.boxDim[1]; diff.z = abs(local.z) - gSimParamsDev.boxDim[2]; dist = max(diff.x, diff.y); dist = max(dist, diff.z); // if the particle lies outside the box, the collision must be handled float3 contact; if (dist > 0.0f) { // contact point in "box space" contact.x = min(gSimParamsDev.boxDim[0], max(-gSimParamsDev.boxDim[0], local.x)); contact.y = min(gSimParamsDev.boxDim[1], max(-gSimParamsDev.boxDim[1], local.y)); contact.z = min(gSimParamsDev.boxDim[2], max(-gSimParamsDev.boxDim[2], local.z)); // translate to worldspace contact.x += gSimParamsDev.boxCen[0]; contact.y += gSimParamsDev.boxCen[1]; contact.z += gSimParamsDev.boxCen[2]; // compute penetration depth depth = compute_distance(contact, pos); // compute normal nrm.x = pos.x - contact.x; nrm.y = pos.y - contact.y; nrm.z = pos.z - contact.z; normalize(nrm); float velNorm = norm(vel); float dp = dot_product(nrm, vel); float coeff = (1 + gSimParamsDev.restitution*depth/ (gSimParamsDev.timeStep*velNorm))*dp; vel.x -= coeff*nrm.x; vel.y -= coeff*nrm.y; vel.z -= coeff*nrm.z; subParticleVertexData[idVert + VD_POS_X] = contact.x; subParticleVertexData[idVert + VD_POS_Y] = contact.y; subParticleVertexData[idVert + VD_POS_Z] = contact.z; subParticleSimulationData[idSim + SD_VEL0_X] = vel.x; subParticleSimulationData[idSim + SD_VEL0_Y] = vel.y; subParticleSimulationData[idSim + SD_VEL0_Z] = vel.z; } } //----------------------------------------------------------------------------- __global__ void find_split_particles (float* particleVertexData, char* particleState, int* particleIdList, int* cellStartList, int* cellEndList) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int id = particleIdList[idx]; float3 pos; pos.x = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; if (pos.x >= 0.2f && pos.x <= 0.5f) { particleState[id] |= 1; } } //----------------------------------------------------------------------------- __global__ void find_boundary_particles (float* particleVertexData, char* particleState, int* particleIdList, int* cellStartList, int* cellEndList) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int id = particleIdList[idx]; unsigned int state = particleState[id] & 3; float3 pos; float3 xj; float3 r; float rn; if (state == 1) { // distribute information to neigbors pos.x = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupport); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupport); int hash; int start; int end; for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = cellStartList[hash]; end = cellEndList[hash]; for (int u = start; u < end; u++) { int v = particleIdList[u]; xj.x = particleVertexData[v*VD_NUM_ELEMENTS + VD_POS_X]; xj.y = particleVertexData[v*VD_NUM_ELEMENTS + VD_POS_Y]; xj.z = particleVertexData[v*VD_NUM_ELEMENTS + VD_POS_Z]; r.x = pos.x - xj.x; r.y = pos.y - xj.y; r.z = pos.z - xj.z; rn = r.x*r.x + r.y*r.y + r.z*r.z; if (rn <= gSimParamsDev.compactSupport* gSimParamsDev.compactSupport) { particleState[v] |= 2; } } } } } } } //----------------------------------------------------------------------------- // sets the sub particle vertex & simulation data for each particle, that has // changed its state from "default" to "boundary" or "split". // This kernel is called for particles that were split and for boundary // particles. [numParticles] refers to the total number of those particles. __global__ void initialize_sub_particles (float* subParticleVertexData, float* subParticleSimulationData, int* particleIds, float* particleVertexData, float* particleSimulationData, char* particleStates, unsigned int numParticles) { #define SQRT3INV 0.577350269 // = 1/sqrt{3} // directions to seed the new sub particles const float directions[] = { SQRT3INV, SQRT3INV, SQRT3INV, SQRT3INV, SQRT3INV, -SQRT3INV, SQRT3INV, -SQRT3INV, SQRT3INV, SQRT3INV, -SQRT3INV, -SQRT3INV, -SQRT3INV, SQRT3INV, SQRT3INV, -SQRT3INV, SQRT3INV, -SQRT3INV, -SQRT3INV, -SQRT3INV, SQRT3INV, -SQRT3INV, -SQRT3INV, -SQRT3INV }; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } unsigned int id = particleIds[idx]; char state = particleStates[id] & 15; // if parent particle makes transition from "default" -> "split" (3) // "default" -> "boundary" (2), "split" -> "boundary" (14) the sub particle needs // to be reinitialized // if (state == 2 || state == 3 || state == 14 || state == 11) { float density = particleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY]; float radicand = 3.0f*gSimParamsDev.particleMass/(4.0f*M_PI*density); float radius = pow(radicand, 1.0f/3.0f); float3 pos; pos.x = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; // ... initialize initial position and velocity of the corresponding // sub particles for (unsigned int i = 0; i < 8; i++) { // update velocity int index = (8*id + i)*SD_NUM_ELEMENTS; subParticleSimulationData[index + SD_VEL0_X] = particleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_X]; subParticleSimulationData[index + SD_VEL0_Y] = particleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Y]; subParticleSimulationData[index + SD_VEL0_Z] = particleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Z]; // update position index = (8*id + i)*VD_NUM_ELEMENTS; subParticleVertexData[index + VD_POS_X] = pos.x + directions[3*i + 0]*radius; subParticleVertexData[index + VD_POS_Y] = pos.y + directions[3*i + 1]*radius; subParticleVertexData[index + VD_POS_Z] = pos.z + directions[3*i + 2]*radius; } } #undef SQRT3INV } //----------------------------------------------------------------------------- __global__ void check_split_boundary_default (char* particleState, int* isSplit, int* isBoundary, int* isDefault) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; char state = particleState[idx] & 3; if (idx >= gSimParamsDev.numParticles) { return; } if (state == 0) { isDefault[idx] = 1; } else if (state == 2) { isBoundary[idx] = 1; } else { isSplit[idx] = 1; } } //----------------------------------------------------------------------------- __global__ void collect_ids (int* subParticleIdList, int* sortedSubParticleIdList,int* splitParticleIdList, int* boundaryParticleIdList, int* defaultParticleIdList, int* isSplit, int* isBoundary, int* isDefault, int* splitPrefixSum, int* boundaryPrefixSum, int* defaultPrefixSum, unsigned int numParticlesSplit) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } if (isSplit[idx] == 1) { int splitPreSum = splitPrefixSum[idx]; for (unsigned int i = 0; i < 8; i++) { subParticleIdList[8*splitPreSum + i] = 8*idx + i; sortedSubParticleIdList[8*splitPreSum + i] = 8*idx + i; } splitParticleIdList[splitPreSum] = idx; } else if (isBoundary[idx] == 1) { int boundaryPreSum = boundaryPrefixSum[idx]; for (unsigned int i = 0; i < 8; i++) { subParticleIdList[8*(numParticlesSplit + boundaryPreSum) + i] = 8*idx + i; sortedSubParticleIdList[8*(numParticlesSplit + boundaryPreSum) + i] = 8*idx + i; } boundaryParticleIdList[boundaryPreSum] = idx; } else { defaultParticleIdList[defaultPrefixSum[idx]] = idx; } } //__global__ void collision_handling(float* particleVertexData, // float* particleSimulationData) //{ // /*unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; // // if (idx >= gSimParamsDev.numParticles) // { // return; // } // // unsigned int idVert = idx*VD_NUM_ELEMENTS; // unsigned int idSim = idx*SD_NUM_ELEMENTS; // // float3 pos; // float3 vel; // // pos.x = tex1Dfetch(gParticleVertexData, idVert + VD_POS_X); // pos.y = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Y); // pos.z = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Z); // // vel.x = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_X); // vel.y = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Y); // vel.z = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Z); // // // // unsigned int i,j,k; // i = (unsigned int)((pos.x - gBoundaryOrigin[0])/gDx); // j = (unsigned int)((pos.y - gBoundaryOrigin[1])/gDx); // k = (unsigned int)((pos.z - gBoundaryOrigin[2])/gDx); // unsigned int idx2 = i + gnBoundarySamples[0]*(j + gnBoundarySamples[1]*k); // unsigned int nodeIdx = tex1Dfetch(gIndexMap, idx2); // float dist = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_DISTANCE); // // float3 bNorm; // // bNorm.x = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_NORMAL_X); // bNorm.y = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_NORMAL_Y); // bNorm.z = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_NORMAL_Z); // // if (bNorm.y != 0.0f) // { // particleVertexData[idVert + VD_POS_X] -= gSimParamsDev.timeStep*vel.x; // particleVertexData[idVert + VD_POS_Y] -= gSimParamsDev.timeStep*vel.y; // particleVertexData[idVert + VD_POS_Z] -= gSimParamsDev.timeStep*vel.z; // }*/ //} //----------------------------------------------------------------------------- __global__ void create_density_slice (float* data, unsigned int width, unsigned int height, unsigned int depth) { int u = blockIdx.x*blockDim.x + threadIdx.x; int v = blockIdx.y*blockDim.y + threadIdx.y; unsigned int idx = width*v + u; if (u >= width || v >= height) { return; } float3 pos; pos.x = -1.0f + gSimParamsDev.gridSpacing*u; pos.y = gSimParamsDev.gridOrigin[1] + gSimParamsDev.gridSpacing*v; pos.z = gSimParamsDev.gridOrigin[2] + gSimParamsDev.gridSpacing*depth; float tu = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float tv = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float tw = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float density = tex3D(gBoundaryDistances, tu, tv, tw); data[idx] = density; } //----------------------------------------------------------------------------- // definition of aux. functions (device) //----------------------------------------------------------------------------- __device__ inline int3 compute_grid_coordinate(float3 pos, float d) { int3 gridCoord; gridCoord.x = (unsigned int)((pos.x + d - gSimParamsDev.gridOrigin[0])/ gSimParamsDev.gridSpacing); gridCoord.y = (unsigned int)((pos.y + d - gSimParamsDev.gridOrigin[1])/ gSimParamsDev.gridSpacing); gridCoord.z = (unsigned int)((pos.z + d - gSimParamsDev.gridOrigin[2])/ gSimParamsDev.gridSpacing); gridCoord.x = gridCoord.x%gSimParamsDev.gridDim[0]; gridCoord.y = gridCoord.y%gSimParamsDev.gridDim[1]; gridCoord.z = gridCoord.z%gSimParamsDev.gridDim[2]; gridCoord.x = min(max(gridCoord.x, 0),gSimParamsDev.gridDim[0] - 1); gridCoord.y = min(max(gridCoord.y, 0),gSimParamsDev.gridDim[1] - 1); gridCoord.z = min(max(gridCoord.z, 0),gSimParamsDev.gridDim[2] - 1); return gridCoord; } //----------------------------------------------------------------------------- __device__ inline int3 compute_grid_coordinate_sub_particles(float3 pos, float d) { int3 gridCoord; gridCoord.x = (unsigned int)((pos.x + d - gSimParamsDev.gridOrigin[0])/ gSimParamsDev.gridSpacingSubParticles); gridCoord.y = (unsigned int)((pos.y + d - gSimParamsDev.gridOrigin[1])/ gSimParamsDev.gridSpacingSubParticles); gridCoord.z = (unsigned int)((pos.z + d - gSimParamsDev.gridOrigin[2])/ gSimParamsDev.gridSpacingSubParticles); gridCoord.x = gridCoord.x%gSimParamsDev.gridDimSubParticles[0]; gridCoord.y = gridCoord.y%gSimParamsDev.gridDimSubParticles[1]; gridCoord.z = gridCoord.z%gSimParamsDev.gridDimSubParticles[2]; gridCoord.x = min(max(gridCoord.x, 0), gSimParamsDev.gridDimSubParticles[0] - 1); gridCoord.y = min(max(gridCoord.y, 0), gSimParamsDev.gridDimSubParticles[1] - 1); gridCoord.z = min(max(gridCoord.z, 0), gSimParamsDev.gridDimSubParticles[2] - 1); return gridCoord; } //----------------------------------------------------------------------------- __device__ inline int compute_hash_from_grid_coordinate (int i, int j, int k) { return gSimParamsDev.gridDim[0]*(gSimParamsDev.gridDim[1]*k + j) + i; } //----------------------------------------------------------------------------- __device__ inline int compute_hash_from_grid_coordinate_sub_particle (int i, int j, int k) { return gSimParamsDev.gridDimSubParticles[0]* (gSimParamsDev.gridDimSubParticles[1]*k + j) + i; } //----------------------------------------------------------------------------- __device__ inline float norm(const float3& a) { return sqrt(a.x*a.x+a.y*a.y+a.z*a.z); } //----------------------------------------------------------------------------- __device__ inline void normalize (float3& a) { float norm = sqrt(a.x*a.x+a.y*a.y+a.z*a.z); a.x /= norm; a.y /= norm; a.z /= norm; } //----------------------------------------------------------------------------- __device__ inline float compute_distance (float3 a, float3 b) { return sqrt((a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z)); } //----------------------------------------------------------------------------- __device__ inline float compute_squared_distance (float3 a, float3 b) { return ((a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z)); } //----------------------------------------------------------------------------- __device__ inline float dot_product (const float3& a, const float3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } //----------------------------------------------------------------------------- __device__ inline void project_quantities_cell (float3& acc, float& density, float& pressure, float& numNeighbors, const float3& xi, int start, int end) { int j; float3 xj; // neighbor particle's position float3 vj; // neighbor particle's velocity float rhoj; // neighbor density float pj; // neighbor pressure float3 aj; // neighbor acceleration float h = gSimParamsDev.compactSupportSub; float sqDist, d; for (int i = start; i < end; i++) { j = tex1Dfetch(gSortedParticleIdList, i); xj.x = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_X); xj.y = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Y); xj.z = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Z); rhoj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_DENSITY); pj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_PRESSURE); aj.x = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_ACC_X); aj.y = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_ACC_Y); aj.z = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_ACC_Z); sqDist = compute_squared_distance(xi, xj); if (sqDist <= h*h) { density += rhoj; pressure += pj; acc.x += aj.x; acc.y += aj.y; acc.z += aj.z; numNeighbors += 1.0f; /*d = h*h - sqDist; density += gSimParamsDev.poly6Sub*rhoj; pressure += gSimParamsDev.poly6Sub*pj; velocity.x += gSimParamsDev.poly6Sub*vj.x*d*d*d; velocity.y += gSimParamsDev.poly6Sub*vj.y*d*d*d; velocity.z += gSimParamsDev.poly6Sub*vj.z*d*d*d;*/ } } } //----------------------------------------------------------------------------- // Computes the contribution of neighborparticles of one particular grid cell // to the density of the particle at position [pos]. __device__ float compute_particle_density_cell (const float3 &pos, float* particleVertexData, int* particleIdList, int start, int end) { int particleIndex; // index of the neighbor of the particle float density = 0.0f; float3 p; // neighbor particle's position float h = gSimParamsDev.compactSupport; float r; float d; for (int i = start; i < end; i++) { particleIndex = particleIdList[i]; // compute position of the neighbor p.x = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_X]; p.y = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Y]; p.z = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Z]; r = compute_distance(p, pos); // TODO: evaluating r*r <= h*h might save taking the sqrt in // compute_distance proc. if (r <= h) { d = h*h - r*r; density += gSimParamsDev.poly6*d*d*d; } } return density; } //----------------------------------------------------------------------------- // Computes the contribution of neighborsub particles of one particular grid // cell to the density of the particle at position [pos]. __device__ float compute_sub_particle_density_cell (const float3 &pos, float* particleVertexData, int* particleIdList, int start, int end) { int particleIndex; // index of the neighbor of the particle float density = 0.0f; float3 p; // neighbor particle's position float h = gSimParamsDev.compactSupportSub; float r; float d; for (int i = start; i < end; i++) { particleIndex = particleIdList[i]; // compute position of the neighbor p.x = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_X]; p.y = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Y]; p.z = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Z]; r = compute_distance(p, pos); // TODO: evaluating r*r <= h*h might save taking the sqrt in // compute_distance proc. if (r <= h) { d = h*h - r*r; density += gSimParamsDev.poly6Sub*d*d*d; } } return density; } //----------------------------------------------------------------------------- __device__ inline void compute_viscosity_pressure_forces_and_ifsurf_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* pressureForce, float3* viscosityForce, float3* colGra, float* colLapl, float3* sumPosNeighbor, float* nNeighbors) { int j; // neighbor index in particle list float3 xj; // neighbor particle's position float3 vj; // neighbor particle's velocity float rhoj; // neighbor density float pj; // neighbor pressure float3 r; // xi - xj float rn; // ||xi - xj|| float h = gSimParamsDev.compactSupport; // effective radius float grad = gSimParamsDev.gradSpiky; float lapl = gSimParamsDev.laplVisc; float grad2 = gSimParamsDev.gradPoly6; float lapl2 = gSimParamsDev.laplPoly6; float pressure; // pressure term in the kernel approx float rhoi2 = rhoi*rhoi; float m = gSimParamsDev.particleMass; float mu = gSimParamsDev.dynamicViscosity; float d; // helper value to avoid arithmetic operations for (int i = start; i < end; i++) { // get neighbor index from particle list j = tex1Dfetch(gSortedParticleIdList, i); // get neighbor particle information xj.x = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_X); xj.y = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Y); xj.z = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Z); vj.x = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_VEL0_X); vj.y = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_VEL0_Y); vj.z = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_VEL0_Z); rhoj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_DENSITY); pj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_PRESSURE); r.x = xi.x - xj.x; r.y = xi.y - xj.y; r.z = xi.z - xj.z; rn = norm(r); // TODO: * masse koennte ausgeklammert werden um multiplikationen // zu sparen. // * generell kann der pressure term in hinblick auf rhoi und // pi vereinfacht werden. // * visc force: mu koennte ausgeklammert werden etc. // * zwei float3's fuer beide kraefte koennten genutzt werden // um die terme zu vereinfachen. pressure = rhoi*(pi/rhoi2 + pj/(rhoj*rhoj))*m; if (rn <= h && rn > 0.0f) { // compute pressure force d = (h-rn)*(h-rn); pressureForce->x -= pressure*grad*d/rn*r.x; pressureForce->y -= pressure*grad*d/rn*r.y; pressureForce->z -= pressure*grad*d/rn*r.z; // compute viscosity force d = (h-rn); viscosityForce->x += mu*(vj.x-vi.x)*m/rhoj*lapl*d; viscosityForce->y += mu*(vj.y-vi.y)*m/rhoj*lapl*d; viscosityForce->z += mu*(vj.z-vi.z)*m/rhoj*lapl*d; // compute color gradient d = (h*h-rn*rn)*(h*h-rn*rn); colGra->x += m/rhoj*grad2*d*r.x; colGra->y += m/rhoj*grad2*d*r.y; colGra->z += m/rhoj*grad2*d*r.z; // compute color laplacian d = (h*h - rn*rn)*(3.0f*h*h - 7.0f*rn*rn); *colLapl += m/rhoj*lapl2*d; // sumPosNeighbor->x += xj.x; sumPosNeighbor->y += xj.y; sumPosNeighbor->z += xj.z; *nNeighbors += 1.0f; } } } //----------------------------------------------------------------------------- __device__ inline void compute_sub_particle_viscosity_pressure_forces_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* force, float3* colGra, float* colLapl) { int j; // neighbor index in particle list float3 xj; // neighbor particle's position float3 vj; // neighbor particle's velocity float rhoj; // neighbor density float pj; // neighbor pressure float3 r; // xi - xj float rn; // ||xi - xj|| float h = gSimParamsDev.compactSupportSub; // effective radius float grad = gSimParamsDev.gradSpikySub; float lapl = gSimParamsDev.laplViscSub; float grad2 = gSimParamsDev.gradPoly6Sub; float lapl2 = gSimParamsDev.laplPoly6Sub; float pressure; // pressure term in the kernel approx float rhoi2 = rhoi*rhoi; float m = gSimParamsDev.subParticleMass; float mu = gSimParamsDev.dynamicViscosity; float d; // helper value to avoid arithmetic operations for (int i = start; i < end; i++) { // get neighbor index from particle list j = particleIdList[i]; // get neighbor particle information xj.x = particleVertexData[j*VD_NUM_ELEMENTS + VD_POS_X]; xj.y = particleVertexData[j*VD_NUM_ELEMENTS + VD_POS_Y]; xj.z = particleVertexData[j*VD_NUM_ELEMENTS + VD_POS_Z]; vj.x = particleSimulationData[j*SD_NUM_ELEMENTS + SD_VEL0_X]; vj.y = particleSimulationData[j*SD_NUM_ELEMENTS + SD_VEL0_Y]; vj.z = particleSimulationData[j*SD_NUM_ELEMENTS + SD_VEL0_Z]; rhoj = particleSimulationData[j*SD_NUM_ELEMENTS + SD_DENSITY]; pj = particleSimulationData[j*SD_NUM_ELEMENTS + SD_PRESSURE]; r.x = xi.x - xj.x; r.y = xi.y - xj.y; r.z = xi.z - xj.z; rn = norm(r); // TODO: * masse koennte ausgeklammert werden um multiplikationen // zu sparen. // * generell kann der pressure term in hinblick auf rhoi und // pi vereinfacht werden. // * visc force: mu koennte ausgeklammert werden etc. // * zwei float3's fuer beide kraefte koennten genutzt werden // um die terme zu vereinfachen. pressure = rhoi*(pi/rhoi2 + pj/(rhoj*rhoj))*m; if (rn <= h && rn > 0.0f) { // compute pressure force d = (h-rn)*(h-rn); force->x -= pressure*grad*d/rn*r.x; force->y -= pressure*grad*d/rn*r.y; force->z -= pressure*grad*d/rn*r.z; // compute viscosity force d = (h - rn); force->x += mu*(vj.x - vi.x)*m/rhoj*lapl*d; force->y += mu*(vj.y - vi.y)*m/rhoj*lapl*d; force->z += mu*(vj.z - vi.z)*m/rhoj*lapl*d; // compute color gradient d = (h*h - rn*rn)*(h*h - rn*rn); colGra->x += m/rhoj*grad2*d*r.x; colGra->y += m/rhoj*grad2*d*r.y; colGra->z += m/rhoj*grad2*d*r.z; // compute color laplacian d = (h*h - rn*rn)*(3.0f*h*h - 7.0f*rn*rn); *colLapl += m/rhoj*lapl2*d; } } } //----------------------------------------------------------------------------- template<typename T> __global__ void copy_array (T* dst, T* src, unsigned int numElements) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numElements) { return; } dst[idx] = src[idx]; } //----------------------------------------------------------------------------- // HOST CODE //----------------------------------------------------------------------------- #define EMPTY_CELL 0xFFFFFFFF //----------------------------------------------------------------------------- // forward declaration of aux. functions //----------------------------------------------------------------------------- void create_particle_box (float sx, float sy, float sz, float d, unsigned int numParticles, float** particleVD, float** particleSD, unsigned int* numParticlesCreated); void set_simulation_domain (float xs, float ys, float zs, float xe, float ye, float ze, float gridSpacing, float gridSpacingSubParticles, SimulationParameters* parameters); void compute_particle_kernel_invocation_information (unsigned int& nThreadsBlock, unsigned int& nBlocks, unsigned int numParticles); void set_up_3d_float_texture (struct textureReference* texref, hipArray* arr, float* data, unsigned int dim[3]); //----------------------------------------------------------------------------- // Definition of ParticleSimulation class //----------------------------------------------------------------------------- ParticleSimulation::ParticleSimulation (): mParticleVertexData(NULL), mParticleSimulationData(NULL), mParticleVertexDataDevPtr(NULL), mParticleSimulationDataDevPtr(NULL), mParticleIdsDevPtr(NULL), mParticleHashListDevPtr(NULL), mCellStartListDevPtr(NULL), mCellEndListDevPtr(NULL), mIsSurfaceParticleDevPtr(NULL), mParticleVertexDataVbo(0), mNumBlocks(0), mThreadsPerBlock(0), mNumSubParticles(0), mNumTimeSteps(0) { memset(&mParameters, 0, sizeof(SimulationParameters)); } //----------------------------------------------------------------------------- ParticleSimulation::~ParticleSimulation() { // free host memory saveDeleteArray<float>(&mParticleVertexData); saveDeleteArray<float>(&mParticleSimulationData); // free device memory // free cuda memory cudaSafeFree<float>(&mParticleVertexDataDevPtr); cudaSafeFree<float>(&mParticleSimulationDataDevPtr); cudaSafeFree<float>(&mSubParticleVertexDataDevPtr); cudaSafeFree<float>(&mSubParticleSimulationDataDevPtr); cudaSafeFree<char>(&mParticleStatesDevPtr); cudaSafeFree<int>(&mParticleHashListDevPtr); cudaSafeFree<int>(&mCellStartListDevPtr); cudaSafeFree<int>(&mCellEndListDevPtr); cudaSafeFree<int>(&mIsSurfaceParticleDevPtr); cudaSafeFree<int>(&mParticleIdsDevPtr); cudaSafeFree<int>(&_isSplitDevPtr); cudaSafeFree<int>(&_isBoundaryDevPtr); cudaSafeFree<int>(&_isDefaultDevPtr); cudaSafeFree<int>(&_splitPrefixSumDevPtr); cudaSafeFree<int>(&_boundaryPrefixSumDevPtr); cudaSafeFree<int>(&_defaultPrefixSumDevPtr); cudaSafeFree<int>(&mParticleIdsSplitDevPtr); cudaSafeFree<int>(&mParticleIdsDefaultDevPtr); cudaSafeFree<int>(&mSubParticleIdsDevPtr); cudaSafeFree<int>(&mParticleIdsBoundaryDevPtr); cudaSafeFree<int>(&mParticleIdsSplitDevPtr); cudaSafeFree<int>(&mSubParticleHashsDevPtr); cudaSafeFree<int>(&mSubParticleCellStartIdsDevPtr); cudaSafeFree<int>(&mSubParticleCellEndIdsDevPtr); // free OpenGL vertex buffer object if (mParticleVertexDataVbo != 0) { CUDA_SAFE_CALL( hipGraphicsUnregisterResource(mGraphicsResources[0]) ); CUDA_SAFE_CALL( hipGraphicsUnregisterResource(mGraphicsResources[1]) ); //hipGLUnregisterBufferObject(mParticleVertexDataVbo); // <- deprecated glDeleteBuffers(1, &mParticleVertexDataVbo); glDeleteBuffers(1, &mSubParticleVertexDataVbo); mParticleVertexDataVbo = 0; mSubParticleVertexDataVbo = 0; } } //----------------------------------------------------------------------------- ParticleSimulation* ParticleSimulation::Example01 () { // create a particle simulation ParticleSimulation* sim = new ParticleSimulation(); // create box (cube) of particles create_particle_box(-0.65f, -0.45f, -0.25f, 0.5f, 40000, &sim->mParticleVertexData, &sim->mParticleSimulationData, &sim->mParameters.numParticles); if (sim->mParticleVertexData == NULL || sim->mParticleSimulationData == NULL) { THROW_EXCEPTION("Could not allocate memory for particles (Host)."); } // set sph simulation related parameters sim->mParameters.kernelParticles = 20; sim->mParameters.restDensity = 998.648f; sim->mParameters.particleMass = sim->mParameters.restDensity*0.5f*0.5f*0.5f/ static_cast<float>(sim->mParameters.numParticles); sim->mParameters.subParticleMass = sim->mParameters.particleMass/8.0f; sim->mParameters.gasStiffness = 3.0f; sim->mParameters.dynamicViscosity = 3.0f; sim->mParameters.gravity = 9.81f; sim->mParameters.tensionCoefficient = 0.0728f; sim->mParameters.normThresh = 15.065f; // compute the kernel radius float h = powf((3.0f*0.5f*0.5f*0.5f*sim->mParameters.kernelParticles)/ (4.0f*M_PI*sim->mParameters.numParticles), 1.0f/3.0f); sim->mParameters.compactSupport = h; sim->mParameters.compactSupportSub = h/2.0f; sim->mParameters.poly6 = 315.0f/(64.0f*M_PI*h*h*h*h*h*h*h*h*h); sim->mParameters.gradPoly6 = -945.0f/(32.0f*M_PI*h*h*h*h*h*h*h*h*h); sim->mParameters.laplPoly6 = -945.0f/(32.0f*M_PI*h*h*h*h*h*h*h*h*h); sim->mParameters.gradSpiky = -45.0f/(M_PI*h*h*h*h*h*h); sim->mParameters.laplVisc = 45.0f/(M_PI*h*h*h*h*h*h); sim->mParameters.poly6Sub = sim->mParameters.poly6*512.0f; sim->mParameters.gradPoly6Sub = sim->mParameters.gradPoly6*512.0f; sim->mParameters.laplPoly6Sub = sim->mParameters.laplPoly6*512.0f; sim->mParameters.gradSpikySub = sim->mParameters.gradSpiky*64.0f; sim->mParameters.laplViscSub = sim->mParameters.laplVisc*64.0f; sim->mParameters.timeStep = 0.003; sim->mParameters.timeStepSubParticles = 0.001f; set_simulation_domain(-1.0f, -1.0f, -1.0f, 1.0f, 1.0f, 1.0f, h, h/2.0f, &sim->mParameters); // set fluid volume sim->mParameters.fluidVolume = 0.5f*0.5f*0.5f; // set parameters for boundary handling sim->mParameters.restitution = 0.0f; sim->mParameters.boxCen[0] = 0.0f; sim->mParameters.boxCen[1] = 0.0f; sim->mParameters.boxCen[2] = 0.0f; sim->mParameters.boxDim[0] = 0.7f; sim->mParameters.boxDim[1] = 0.5f; sim->mParameters.boxDim[2] = 0.3f; // set parameters for new boundary handling sim->_boundaryMapFileName = std::string("icosphere.txt"); // set parameters for surface extraction sim->mParameters.cmDistanceThresh = 0.5f; sim->mParameters.nPartTresh = 20.0f; sim->_leftI = 0.0f; sim->_rightI = 1.0f; return sim; } //----------------------------------------------------------------------------- int* ParticleSimulation::CreateIsParticleSurfaceList (const ParticleSimulation* sim) { int* isSurfaceParticleList = new int[sim->mParameters.numParticles]; CUDA_SAFE_CALL( hipMemcpy(isSurfaceParticleList, sim->mIsSurfaceParticleDevPtr, sizeof(int)*sim->mParameters.numParticles, hipMemcpyDeviceToHost) ); int extr = 0; for (unsigned int i = 0; i < sim->mParameters.numParticles; i++) { extr += isSurfaceParticleList[i]; } printf("%d of %d extracted\n", extr, sim->mParameters.numParticles); return isSurfaceParticleList; } //----------------------------------------------------------------------------- void ParticleSimulation::FreeIsParticleSurfaceList (int** isSurfaceParticleList) { if (*isSurfaceParticleList == NULL) { return; } delete[] *isSurfaceParticleList; *isSurfaceParticleList = NULL; } //----------------------------------------------------------------------------- void ParticleSimulation::freeAll () { // free host memory saveDeleteArray<float>(&mParticleVertexData); saveDeleteArray<float>(&mParticleSimulationData); // free device memory // free cuda memory cudaSafeFree<float>(&mParticleSimulationDataDevPtr); cudaSafeFree<int>(&mParticleIdsDevPtr); cudaSafeFree<int>(&mParticleHashListDevPtr); cudaSafeFree<int>(&mCellStartListDevPtr); cudaSafeFree<int>(&mCellEndListDevPtr); cudaSafeFree<int>(&mIsSurfaceParticleDevPtr); // free OpenGL vertex buffer object if (mParticleVertexDataVbo != 0) { CUDA_SAFE_CALL( hipGraphicsUnregisterResource(mGraphicsResources[0]) ); CUDA_SAFE_CALL( hipGraphicsUnregisterResource(mGraphicsResources[1]) ); //hipGLUnregisterBufferObject(mParticleVertexDataVbo); // <- deprecated glDeleteBuffers(1, &mParticleVertexDataVbo); glDeleteBuffers(1, &mSubParticleVertexDataVbo); mParticleVertexDataVbo = 0; mSubParticleVertexDataVbo = 0; } } //----------------------------------------------------------------------------- void ParticleSimulation::Init () { // // free device memory, if previously allocated // // free cuda memory cudaSafeFree<float>(&mParticleSimulationDataDevPtr); cudaSafeFree<int>(&mParticleIdsDevPtr); cudaSafeFree<int>(&mParticleHashListDevPtr); cudaSafeFree<int>(&mCellStartListDevPtr); cudaSafeFree<int>(&mCellEndListDevPtr); cudaSafeFree<int>(&mIsSurfaceParticleDevPtr); // free OpenGL vertex buffer object if (mParticleVertexDataVbo != 0) { CUDA_SAFE_CALL( hipGraphicsUnregisterResource(mGraphicsResources[0]) ); CUDA_SAFE_CALL( hipGraphicsUnregisterResource(mGraphicsResources[1]) ); glDeleteBuffers(1, &mParticleVertexDataVbo); glDeleteBuffers(1, &mSubParticleVertexDataVbo); mParticleVertexDataVbo = 0; mSubParticleVertexDataVbo = 0; } // // allocate cuda device memory for storing the particles' vertex and // simulation data. // Vertex data is allocated on device using OpenGL, as it is stored // in an vertex buffer object, which is used for rendering later. // // Simulation data is allocated through cuda. CUDA_SAFE_CALL( hipMalloc(&mParticleSimulationDataDevPtr, mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS) ); // copy initial host data to device CUDA_SAFE_CALL( hipMemcpy(mParticleSimulationDataDevPtr, mParticleSimulationData, mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS, hipMemcpyHostToDevice) ); // Vertex data is allocated through a vertex buffer object // the vbo is then registered to be used with CUDA glGenBuffers(1, &mParticleVertexDataVbo); glBindBuffer(GL_ARRAY_BUFFER, mParticleVertexDataVbo); glBufferData(GL_ARRAY_BUFFER, mParameters.numParticles*VD_NUM_ELEMENTS*sizeof(float), mParticleVertexData, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( hipGraphicsGLRegisterBuffer(&mGraphicsResources[0], mParticleVertexDataVbo, hipGraphicsMapFlagsNone) ); //hipGLRegisterBufferObject(mParticleVertexDataVbo); // <- is deprecated // // alloc & Init additional aux. arrays for nearest neighbor search // const int* dim = mParameters.gridDim; unsigned int size = dim[0]*dim[1]*dim[2]*sizeof(int); CUDA_SAFE_CALL( hipMalloc(&mCellStartListDevPtr, size) ); CUDA_SAFE_CALL( hipMalloc(&mCellEndListDevPtr, size) ); // set each cell to be empty CUDA_SAFE_CALL( hipMemset(mCellStartListDevPtr, EMPTY_CELL, size) ); CUDA_SAFE_CALL( hipMemset(mCellEndListDevPtr, EMPTY_CELL, size) ); CUDA_SAFE_CALL( hipMalloc(&mParticleIdsDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&mParticleHashListDevPtr, mParameters.numParticles*sizeof(int)) ); // alloc dev memory for surface particle extraction CUDA_SAFE_CALL( hipMalloc(&mIsSurfaceParticleDevPtr, mParameters.numParticles*sizeof(int)) ); try { this->allocateMemoryTwoScale(); } catch (std::runtime_error& e) { std::cout << e.what() << std::endl; system("pause"); } // set up textures, for faster memory look-ups through caching // NOTE: VertexData needs to be mapped to get a valid device pointer, // as it is initial not allocated through CUDA's malloc hipChannelFormatDesc descf = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipChannelFormatDesc desci = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindSigned); hipChannelFormatDesc descu = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned); CUDA_SAFE_CALL ( hipBindTexture(0, gParticleSimulationData, mParticleSimulationDataDevPtr, descf, sizeof(float)*SD_NUM_ELEMENTS*mParameters.numParticles) ); this->map(); CUDA_SAFE_CALL ( hipBindTexture(0, gParticleVertexData, mParticleVertexDataDevPtr, descf, sizeof(float)*VD_NUM_ELEMENTS*mParameters.numParticles) ); this->unmap(); CUDA_SAFE_CALL ( hipBindTexture(0, gCellStartList, mCellStartListDevPtr, desci, size) ); CUDA_SAFE_CALL ( hipBindTexture(0, gCellEndList, mCellEndListDevPtr, desci, size) ); CUDA_SAFE_CALL ( hipBindTexture(0, gSortedParticleIdList, mParticleIdsDevPtr, desci, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL ( hipBindTexture(0, gParticleHashList, mParticleHashListDevPtr, desci, mParameters.numParticles*sizeof(int)) ); // set number of CUDA blocks and threads per blocks for each kernel // invocation // NOTE: - chose different values than 256 to try to get more performance // - make threadsPerBlock and blocks function parameters compute_particle_kernel_invocation_information(mThreadsPerBlock, mNumBlocks, mParameters.numParticles); this->setUpSphInComplexShapes(); } //----------------------------------------------------------------------------- // allocates and initializes memory needed for the two scale particle // simulation void ParticleSimulation::allocateMemoryTwoScale () { // create opengl vbo for storing the vertex information of the // sub particles glGenBuffers(1, &mSubParticleVertexDataVbo); glBindBuffer(GL_ARRAY_BUFFER, mSubParticleVertexDataVbo); glBufferData(GL_ARRAY_BUFFER, 8*mParameters.numParticles*VD_NUM_ELEMENTS* sizeof(float), NULL, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( hipGraphicsGLRegisterBuffer(&mGraphicsResources[1], mSubParticleVertexDataVbo, hipGraphicsMapFlagsNone) ); // create opengl vbo for storing the ids of the particles in // default state glGenBuffers(1, &mParticleIdsDefaultVbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mParticleIdsDefaultVbo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, mParameters.numParticles*sizeof(int), NULL, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( hipGraphicsGLRegisterBuffer(&mGraphicsResources[2], mParticleIdsDefaultVbo, hipGraphicsMapFlagsNone) ); // create opengl vbo for storing the ids of the active sub particles glGenBuffers(1, &mSubParticleIdsVbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mSubParticleIdsVbo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, mParameters.numParticles*sizeof(int)*8, NULL, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( hipGraphicsGLRegisterBuffer(&mGraphicsResources[3], mSubParticleIdsVbo, hipGraphicsMapFlagsNone) ); CUDA_SAFE_CALL( hipMalloc(&mSubParticleSortedIdsDevPtr, sizeof(int)*8*mParameters.numParticles) ); CUDA_SAFE_CALL( hipMalloc(&mSubParticleSimulationDataDevPtr, 8*mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS) ); CUDA_SAFE_CALL( hipMemset(mSubParticleSimulationDataDevPtr, 0, 8*mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS) ); CUDA_SAFE_CALL( hipMalloc(&mSubParticleHashsDevPtr, 8*mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&mParticleStatesDevPtr, mParameters.numParticles*sizeof(char)) ); CUDA_SAFE_CALL( hipMemset(mParticleStatesDevPtr, 0, mParameters.numParticles*sizeof(char)) ); mParticleStates = new char[mParameters.numParticles]; CUDA_SAFE_CALL( hipMalloc(&_isSplitDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&_isBoundaryDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&_isDefaultDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&_splitPrefixSumDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&_boundaryPrefixSumDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&_defaultPrefixSumDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&mParticleIdsDefaultDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&mParticleIdsBoundaryDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&mParticleIdsSplitDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc(&mSubParticleCellStartIdsDevPtr, sizeof(int)*mParameters.gridDimSubParticles[0]* mParameters.gridDimSubParticles[1]* mParameters.gridDimSubParticles[2]) ); CUDA_SAFE_CALL( hipMalloc(&mSubParticleCellEndIdsDevPtr, sizeof(int)*mParameters.gridDimSubParticles[0]* mParameters.gridDimSubParticles[1]* mParameters.gridDimSubParticles[2]) ); } //----------------------------------------------------------------------------- void ParticleSimulation::Bind () const { // copy simulation parameters to constant memory on device. CUDA_SAFE_CALL( hipMemcpyToSymbol(gSimParamsDev, (void*)&mParameters, sizeof(SimulationParameters)) ); } //----------------------------------------------------------------------------- void ParticleSimulation::Advance () { try { this->map(); this->computeParticleHash(); this->sortParticleIdsByHash(); this->computeCellStartEndList(); this->computeDensityPressure(); this->computeAcceleration(); this->computeParticleState(); this->collect(); this->initializeSubParticles(); //this->computeSubParticleHash(); //this->sortSubParticleIdsByHash(); //this->computeSubParticleCellStartEndList(); //this->projectQuantities(); //this->computeSubParticleDensityPressure(); //this->computeSubParticleAcceleration(); this->integrate(); //this->integrateSubParticles(); //this->handleCollisions(); //this->handleSubParticleCollisions(); this->unmap(); mNumTimeSteps++; } catch (runtime_error& e) { cout << e.what() << endl; system("pause"); } } //----------------------------------------------------------------------------- void ParticleSimulation::AdvanceSubParticles () { try { this->map(); this->computeSubParticleHash(); this->sortSubParticleIdsByHash(); this->computeSubParticleCellStartEndList(); this->computeSubParticleDensityPressure(); this->projectQuantities(); mTimer.Start(); this->computeSubParticleAcceleration(); mTimer.Stop(); mTimer.DumpElapsed(); this->integrateSubParticles(); this->handleSubParticleCollisions(); this->unmap(); } catch (runtime_error& e) { cout << e.what() << endl; system("pause"); } } //----------------------------------------------------------------------------- void ParticleSimulation::AdvanceTwoScale () { try { this->map(); this->computeParticleHash(); this->sortParticleIdsByHash(); this->computeCellStartEndList(); this->computeDensityPressure(); this->computeAcceleration(); this->computeParticleState(); this->collect(); this->initializeSubParticles(); this->projectQuantities(); this->computeSubParticleHash(); this->sortSubParticleIdsByHash(); this->computeSubParticleCellStartEndList(); this->computeSubParticleDensityPressure(); this->computeSubParticleAcceleration(); this->integrate(); this->integrateSubParticles(); this->handleCollisions(); this->handleSubParticleCollisions(); this->unmap(); mNumTimeSteps++; } catch (runtime_error& e) { cout << e.what() << endl; system("pause"); } } //----------------------------------------------------------------------------- /*void ParticleSimulation::Check3DTextures () const { // compute a higher res slice of the density data using intrinsic trilinear // interpolation to check of the textures have been set up correctly. unsigned int width = mParameters.gridDim[0]; unsigned int height = mParameters.gridDim[1]; float* sliceDataDevPtr; CUDA_SAFE_CALL( hipMalloc(&sliceDataDevPtr, sizeof(float)*width*height) ); dim3 blockSize(16, 16, 1); dim3 gridSize(width/blockSize.x + 1, height/blockSize.y + 1); create_density_slice <<<gridSize, blockSize>>> (sliceDataDevPtr, width, height, mParameters.gridDim[2]/2); float* sliceData = new float[width*height]; CUDA_SAFE_CALL( hipMemcpy(sliceData, sliceDataDevPtr, sizeof(float)*width*height, hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipFree(sliceDataDevPtr) ); PortablePixmap ppm(width, height, 255); float maxDensity = mBoundaryHandling->ComputeMaxDensity(); float restDistance = mBoundaryHandling->GetRestDistance(); for (unsigned int j = 0; j < height; j++) { for (unsigned int i = 0; i < width; i++) { unsigned int idx = i + j*width; float density = sliceData[idx]; ppm.setJET(i,j, std::abs(density)/restDistance); } } ppm.save("3dtextest.ppm"); delete[] sliceData; }*/ //----------------------------------------------------------------------------- float ParticleSimulation::GetParticleRadius () const { return powf((3.0*mParameters.fluidVolume)/ (4.0*M_PI*mParameters.numParticles), 1.0f/3.0f); } //----------------------------------------------------------------------------- float ParticleSimulation::GetSubParticleRadius () const { return 0.5f*this->GetParticleRadius(); } //----------------------------------------------------------------------------- const char* ParticleSimulation::GetParticleState () const { CUDA_SAFE_CALL( hipMemcpy(mParticleStates, mParticleStatesDevPtr, sizeof(char)*mParameters.numParticles, hipMemcpyDeviceToHost) ); return mParticleStates; } //----------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumParticles () const { return mParameters.numParticles; } //----------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumTimesSteps () const { return mNumTimeSteps; } //----------------------------------------------------------------------------- void ParticleSimulation::SetNPartThresh (float dVal) { mParameters.nPartTresh += dVal; printf("# particle thresh %f\n", mParameters.nPartTresh); this->Bind(); } //----------------------------------------------------------------------------- void ParticleSimulation::DecreaseCmDistanceThresh () { _rightI = mParameters.cmDistanceThresh; mParameters.cmDistanceThresh = 0.5f*(_rightI - _leftI); printf("cmDistance = %f\n", mParameters.cmDistanceThresh); this->Bind(); } //----------------------------------------------------------------------------- void ParticleSimulation::IncreaseCmDistanceThresh () { _leftI = mParameters.cmDistanceThresh; mParameters.cmDistanceThresh = 0.5f*(_rightI - _leftI); printf("cmDistance = %f\n", mParameters.cmDistanceThresh); this->Bind(); } //----------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLParticleVertexBufferObject () const { return mParticleVertexDataVbo; } //----------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLParticleIndexVertexBufferObject () const { return mParticleIdsDefaultVbo; } //----------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumParticlesDefault () const { return mNumParticlesDefault; } //---------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLSubParticleVertexBufferObject () const { return mSubParticleVertexDataVbo; } //---------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLSubParticleIndexVertexBufferObject () const { return mSubParticleIdsVbo; } //---------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumSubParticles () const { return mNumSubParticles; } //---------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumSubParticlesRegular () const { return mNumParticlesSplit*8; } //---------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumSubParticlesBoundary () const { return mNumParticlesBoundary*8; } //----------------------------------------------------------------------------- // Definition of private methods //----------------------------------------------------------------------------- void ParticleSimulation::computeParticleHash () { hipLaunchKernelGGL(( compute_particle_hash) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr, mParticleIdsDevPtr, mParticleHashListDevPtr, mParameters.numParticles); } //----------------------------------------------------------------------------- void ParticleSimulation::computeSubParticleHash () { if (mNumSubParticles != 0) { hipLaunchKernelGGL(( compute_sub_particle_hash) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle) , 0, 0, mSubParticleVertexDataDevPtr, mSubParticleIdsDevPtr, mSubParticleHashsDevPtr, mNumSubParticles); } } //----------------------------------------------------------------------------- void ParticleSimulation::sortParticleIdsByHash () { thrust::sort_by_key(thrust::device_ptr<int>(mParticleHashListDevPtr), thrust::device_ptr<int>(mParticleHashListDevPtr + mParameters.numParticles), thrust::device_ptr<int>(mParticleIdsDevPtr)); } //----------------------------------------------------------------------------- void ParticleSimulation::sortSubParticleIdsByHash () { hipLaunchKernelGGL(( copy_array <int>) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle) , 0, 0, mSubParticleSortedIdsDevPtr, mSubParticleIdsDevPtr, mNumSubParticles); thrust::sort_by_key(thrust::device_ptr<int>(mSubParticleHashsDevPtr), thrust::device_ptr<int>(mSubParticleHashsDevPtr + mNumSubParticles), thrust::device_ptr<int>(mSubParticleSortedIdsDevPtr)); } //----------------------------------------------------------------------------- void ParticleSimulation::computeCellStartEndList () { int* dim = mParameters.gridDim; unsigned int size = dim[0]*dim[1]*dim[2]*sizeof(int); hipMemset(mCellStartListDevPtr, EMPTY_CELL, size); hipMemset(mCellEndListDevPtr, EMPTY_CELL, size); int sharedMemSize = sizeof(int)*(mThreadsPerBlock + 1); hipLaunchKernelGGL(( compute_cell_start_end) , dim3(mNumBlocks), dim3(mThreadsPerBlock), sharedMemSize, 0, mParticleHashListDevPtr, mCellStartListDevPtr, mCellEndListDevPtr, mParameters.numParticles); } //----------------------------------------------------------------------------- void ParticleSimulation::computeSubParticleCellStartEndList () { int* dim = mParameters.gridDimSubParticles; unsigned int size = dim[0]*dim[1]*dim[2]*sizeof(int); if (mNumSubParticles == 0) { return; } hipMemset(mSubParticleCellStartIdsDevPtr, EMPTY_CELL, size); hipMemset(mSubParticleCellEndIdsDevPtr, EMPTY_CELL, size); int sharedMemSize = sizeof(int)*(mThreadsPerBlockSubParticle + 1); hipLaunchKernelGGL(( compute_cell_start_end) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle), sharedMemSize, 0, mSubParticleHashsDevPtr, mSubParticleCellStartIdsDevPtr, mSubParticleCellEndIdsDevPtr, mNumSubParticles); } //----------------------------------------------------------------------------- void ParticleSimulation::computeDensityPressure () { hipLaunchKernelGGL(( compute_particle_density_pressure) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr,mParticleSimulationDataDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr); } //----------------------------------------------------------------------------- void ParticleSimulation::computeSubParticleDensityPressure () { if (mNumSubParticles == 0) { return; } hipLaunchKernelGGL(( compute_sub_particle_density_pressure) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle) , 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mSubParticleSortedIdsDevPtr, mSubParticleCellStartIdsDevPtr, mSubParticleCellEndIdsDevPtr, mNumParticlesSplit*8); } //----------------------------------------------------------------------------- void ParticleSimulation::computeAcceleration () { hipLaunchKernelGGL(( compute_particle_acceleration_ifsurf) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr, mIsSurfaceParticleDevPtr); } //----------------------------------------------------------------------------- //__global__ void compute_sub_particle_acceleration // (float* subParticleVertexData, float* subParticleSimulationData, // int* subParticleIdList, int* subParticleSortedIdList, int* cellStartList, // int* cellEndList, unsigned int numParticles) void ParticleSimulation::computeSubParticleAcceleration () { if (mNumParticlesSplit == 0) { return; } hipLaunchKernelGGL(( compute_sub_particle_acceleration) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle) , 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mSubParticleSortedIdsDevPtr, mSubParticleCellStartIdsDevPtr, mSubParticleCellEndIdsDevPtr, mNumParticlesSplit*8); } //----------------------------------------------------------------------------- void ParticleSimulation::projectQuantities () { if (mNumSubParticles == 0) { return; } hipLaunchKernelGGL(( project_quantities) , dim3(mNumBlocksSubParticleBoundary), dim3(mThreadsPerBlockSubParticleBoundary) , 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, 8*mNumParticlesBoundary, 8*mNumParticlesSplit); } //----------------------------------------------------------------------------- void ParticleSimulation::integrate () { hipLaunchKernelGGL(( integrate_euler) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr); } //----------------------------------------------------------------------------- //__global__ void integrate_sub_particles_euler (float* subParticleVertexData, // float* subParticleSimulationData, int* subParticleIds, // unsigned int nSubParticles, unsigned int offset) void ParticleSimulation::integrateSubParticles () { if (mNumSubParticles != 0) { hipLaunchKernelGGL(( integrate_sub_particles_euler) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle) , 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mNumSubParticles, 0); } } //----------------------------------------------------------------------------- void ParticleSimulation::handleCollisions () { hipLaunchKernelGGL(( collision_handling) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr); } //----------------------------------------------------------------------------- void ParticleSimulation::handleSubParticleCollisions () { if (mNumSubParticles == 0) { return; } hipLaunchKernelGGL(( collision_handling_sub_particles) , dim3(mNumBlocksSubParticle), dim3(mThreadsPerBlockSubParticle) , 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mNumSubParticles, 0); } //----------------------------------------------------------------------------- void ParticleSimulation::computeParticleState () { hipLaunchKernelGGL(( shift_state) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleStatesDevPtr); hipLaunchKernelGGL(( find_split_particles) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr, mParticleStatesDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr); hipLaunchKernelGGL(( find_boundary_particles) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleVertexDataDevPtr, mParticleStatesDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr); } //----------------------------------------------------------------------------- // Collects all id's of active sub particles, particles with state "split", // particles with state "boundary", particles with that "default" in their own // arrays and computes the total number of each particle. void ParticleSimulation::collect () { CUDA_SAFE_CALL( hipMemset(_isSplitDevPtr, 0, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMemset(_isBoundaryDevPtr, 0, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( hipMemset(_isDefaultDevPtr, 0, (mParameters.numParticles + 1)*sizeof(int)) ); hipLaunchKernelGGL(( check_split_boundary_default) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mParticleStatesDevPtr, _isSplitDevPtr, _isBoundaryDevPtr, _isDefaultDevPtr); thrust::exclusive_scan(thrust::device_ptr<int>(_isSplitDevPtr), thrust::device_ptr<int>(_isSplitDevPtr + mParameters.numParticles + 1), thrust::device_ptr<int>(_splitPrefixSumDevPtr)); thrust::exclusive_scan(thrust::device_ptr<int>(_isBoundaryDevPtr), thrust::device_ptr<int>(_isBoundaryDevPtr + mParameters.numParticles + 1), thrust::device_ptr<int>(_boundaryPrefixSumDevPtr)); thrust::exclusive_scan(thrust::device_ptr<int>(_isDefaultDevPtr), thrust::device_ptr<int>(_isDefaultDevPtr + mParameters.numParticles + 1), thrust::device_ptr<int>(_defaultPrefixSumDevPtr)); CUDA_SAFE_CALL( hipMemcpy(&mNumParticlesSplit, &_splitPrefixSumDevPtr[mParameters.numParticles], sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(&mNumParticlesBoundary, &_boundaryPrefixSumDevPtr[mParameters.numParticles], sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(&mNumParticlesDefault, &_defaultPrefixSumDevPtr[mParameters.numParticles], sizeof(int), hipMemcpyDeviceToHost) ); hipLaunchKernelGGL(( collect_ids) , dim3(mNumBlocks), dim3(mThreadsPerBlock) , 0, 0, mSubParticleIdsDevPtr, mSubParticleSortedIdsDevPtr, mParticleIdsSplitDevPtr, mParticleIdsBoundaryDevPtr, mParticleIdsDefaultDevPtr, _isSplitDevPtr, _isBoundaryDevPtr, _isDefaultDevPtr, _splitPrefixSumDevPtr, _boundaryPrefixSumDevPtr, _defaultPrefixSumDevPtr, mNumParticlesSplit); mNumSubParticles = 8*(mNumParticlesSplit + mNumParticlesBoundary); // compute how many cuda blocks and how many threads a block are needed for // split particles, boundary particles, default particles. compute_particle_kernel_invocation_information(mThreadsPerBlockSplit, mNumBlocksSplit, mNumParticlesSplit); compute_particle_kernel_invocation_information(mThreadsPerBlockBoundary, mNumBlocksBoundary, mNumParticlesBoundary); compute_particle_kernel_invocation_information(mThreadsPerBlockDefault, mNumBlocksDefault, mNumParticlesDefault); compute_particle_kernel_invocation_information(mThreadsPerBlockSubParticle, mNumBlocksSubParticle, mNumSubParticles); compute_particle_kernel_invocation_information (mThreadsPerBlockSubParticleBoundary, mNumBlocksSubParticleBoundary, 8*mNumParticlesBoundary); compute_particle_kernel_invocation_information (mThreadsPerBlockSubParticleRegular, mNumBlocksSubParticleRegular, 8*mNumParticlesSplit); } //----------------------------------------------------------------------------- void ParticleSimulation::initializeSubParticles () { // initializes new sub particles if a parent particle has changed its // state from "default" to "boundary" or "split" if (mNumParticlesSplit > 0) { hipLaunchKernelGGL(( initialize_sub_particles) , dim3(mNumBlocksSplit), dim3(mThreadsPerBlockSplit), 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mParticleIdsSplitDevPtr, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mParticleStatesDevPtr, mNumParticlesSplit); } if (mNumParticlesBoundary > 0) { hipLaunchKernelGGL(( initialize_sub_particles) , dim3(mNumBlocksBoundary), dim3(mThreadsPerBlockBoundary), 0, 0, mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mParticleIdsBoundaryDevPtr, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mParticleStatesDevPtr, mNumParticlesBoundary); } } //----------------------------------------------------------------------------- void ParticleSimulation::setUpSphInComplexShapes () { // set up the three dimensional textures that store the boundary // information of the SPH in complex shapes paper (density contribution, // distances). // create boundary handling data Wm5::Vector3f s(-0.8f, -0.6f, -0.4f); Wm5::Vector3f e(0.8f, 0.6f, 0.4f); float h = mParameters.compactSupport; float particleSpacing = std::powf(mParameters.particleMass/mParameters.restDensity, 1.0f/3.0f); float mass = mParameters.particleMass; SphInComplexShapes* mBoundaryHandling = new SphInComplexShapes(s, e, h/4.0f, h, h, mass, mParameters.restDensity, mParameters.dynamicViscosity, particleSpacing); Wm5::Box3f b(Wm5::Vector3f(0.0f, 0.0f, 0.0f), Wm5::Vector3f(1.0f, 0.0f, 0.0f), Wm5::Vector3f(0.0f, 1.0f, 0.0f), Wm5::Vector3f(0.0f, 0.0f, 1.0f), 0.7f, 0.5f, 0.3f); mBoundaryHandling->SetBox(b); mBoundaryHandling->SaveSlicedDistanceMapToPpm("distances.ppm"); mBoundaryHandling->SaveSlicedViscosityMapToPpm("viscosities.ppm"); // send boundary grid information to device float gridOrigin[3]; gridOrigin[0] = mBoundaryHandling->GetGridStart().X(); gridOrigin[1] = mBoundaryHandling->GetGridStart().Y(); gridOrigin[2] = mBoundaryHandling->GetGridStart().Z(); unsigned int gridDimensions[3]; gridDimensions[0] = mBoundaryHandling->GetGridDimension(0); gridDimensions[1] = mBoundaryHandling->GetGridDimension(1); gridDimensions[2] = mBoundaryHandling->GetGridDimension(2); float gridSpacing = mBoundaryHandling->GetGridSpacing(); float restDistance = mBoundaryHandling->GetRestDistance(); float gridLength[3]; gridLength[0] = (gridDimensions[0] - 1)*gridSpacing; gridLength[1] = (gridDimensions[1] - 1)*gridSpacing; gridLength[2] = (gridDimensions[2] - 1)*gridSpacing; CUDA_SAFE_CALL( hipMemcpyToSymbol(gBoundaryGridOrigin, gridOrigin, 3*sizeof(float)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(gBoundaryGridDimensions, gridDimensions, 3*sizeof(unsigned int)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(gBoundaryGridLength, gridLength, 3*sizeof(float)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(gBoundaryGridSpacing, &gridSpacing, sizeof(float)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(gBoundaryRestDistance, &restDistance, sizeof(float)) ); // set up 3d textures float* densityTexData = SphInComplexShapes::CreateDensityTextureData(*mBoundaryHandling); set_up_3d_float_texture(&gBoundaryDensities, mBoundaryDensities, densityTexData, gridDimensions); delete[] densityTexData; float* distanceTexData = SphInComplexShapes::CreateDistanceTextureData(*mBoundaryHandling); set_up_3d_float_texture(&gBoundaryDistances, mBoundaryDistances, distanceTexData, gridDimensions); delete[] distanceTexData; float* viscosityTexData = SphInComplexShapes::CreateViscosityTextureData(*mBoundaryHandling); set_up_3d_float_texture(&gBoundaryViscosities, mBoundaryViscosities, viscosityTexData, gridDimensions); delete[] viscosityTexData; delete mBoundaryHandling; // set up wall textures for sub particles /* { float h = mParameters.compactSupportSub; float particleSpacing = std::powf(mParameters.subParticleMass/mParameters.restDensity, 1.0f/3.0f); float mass = mParameters.subParticleMass; SphInComplexShapes* mBoundaryHandling = new SphInComplexShapes(s, e, h/4.0f, h, h, mass, mParameters.restDensity, mParameters.dynamicViscosity, particleSpacing); }*/ } //----------------------------------------------------------------------------- void ParticleSimulation::map () { hipGraphicsMapResources(4, mGraphicsResources); size_t nBytes; hipGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mParticleVertexDataDevPtr), &nBytes, mGraphicsResources[0]); hipGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mSubParticleVertexDataDevPtr), &nBytes, mGraphicsResources[1]); hipGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mParticleIdsDefaultDevPtr), &nBytes, mGraphicsResources[2]); hipGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mSubParticleIdsDevPtr), &nBytes, mGraphicsResources[3]); } void ParticleSimulation::unmap () { hipGraphicsUnmapResources(4, mGraphicsResources); //hipGLUnmapBufferObject(mParticleVertexDataVbo); } //----------------------------------------------------------------------------- void ParticleSimulation::SaveInfoTable (const std::string& filename) { using namespace std; ofstream file; file.open(filename); int* pIdList = new int[mParameters.numParticles]; int* pHashList = new int[mParameters.numParticles]; int cellListSize = mParameters.gridDim[0]*mParameters.gridDim[1]* mParameters.gridDim[2]; int* pCellStartList = new int[cellListSize]; int* pCellEndList = new int[cellListSize]; //this->map(); hipMemcpy(pHashList, mParticleHashListDevPtr, mParameters.numParticles*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(pIdList, mParticleIdsDevPtr, mParameters.numParticles*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(pCellStartList, mCellStartListDevPtr, cellListSize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(pCellEndList, mCellEndListDevPtr, cellListSize*sizeof(int), hipMemcpyDeviceToHost); file << "Number of particles " << mParameters.numParticles << endl; file << setw(8) << "index" << setw(12) << " id" << setw(12) << " hash" << setw(12) << " start" << setw(12) << " end" << endl; for (unsigned int i = 0; i < cellListSize; i++) { file << setw(8) << i; if(i < mParameters.numParticles) { file << setw(12) << pIdList[i]; file << setw(12) << pHashList[i]; } else { file << setw(12) << ""; file << setw(12) << ""; } if(pCellStartList[i] == EMPTY_CELL) { file << setw(12) << ""; } else { file << setw(12) << pCellStartList[i]; } if(pCellEndList[i] == EMPTY_CELL) { file << setw(12) << "" << endl; } else { file << setw(12) << pCellEndList[i] << endl; } } delete[] pIdList; delete[] pHashList; delete[] pCellStartList; delete[] pCellEndList; file.close(); //this->unmap(); } //----------------------------------------------------------------------------- void ParticleSimulation::SaveParticleInfo (const std::string& filename) { using namespace std; this->map(); ofstream file; file.open(filename); float* particleVertexData = new float[VD_NUM_ELEMENTS*mParameters.numParticles]; float* particleSimulationData = new float[SD_NUM_ELEMENTS*mParameters.numParticles]; // copy particle information from device to host hipMemcpy(particleVertexData, mParticleVertexDataDevPtr, VD_NUM_ELEMENTS*mParameters.numParticles*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particleSimulationData, mParticleSimulationDataDevPtr, SD_NUM_ELEMENTS*mParameters.numParticles*sizeof(float), hipMemcpyDeviceToHost); // set max. chars for each column of the table int columnWidth = 20; file << setw(columnWidth) << "Index"; file << setw(columnWidth) << "X"; file << setw(columnWidth) << "Y"; file << setw(columnWidth) << "Z"; file << setw(columnWidth) << "Density"; file << setw(columnWidth) << "Pressure"; file << setw(columnWidth) << "Acc X"; file << setw(columnWidth) << "Acc Y"; file << setw(columnWidth) << "Acc Z"; file << endl; for (unsigned int i = 0; i < mParameters.numParticles; i++) { file << setw(columnWidth) << i; file << setw(columnWidth) << particleVertexData[VD_NUM_ELEMENTS*i + VD_POS_X]; file << setw(columnWidth) << particleVertexData[VD_NUM_ELEMENTS*i + VD_POS_Y]; file << setw(columnWidth) << particleVertexData[VD_NUM_ELEMENTS*i + VD_POS_Z]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_DENSITY]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_PRESSURE]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_ACC_X]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_ACC_Y]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_ACC_Z]; // TODO: rest of the params. file << endl; } delete[] particleVertexData; delete[] particleSimulationData; file.close(); this->unmap(); } //----------------------------------------------------------------------------- /* unsigned int ParticleSimulation::GetSizeMemoryGPU () const { float size; size += mNumPar }*/ //----------------------------------------------------------------------------- // definition of aux. functions //----------------------------------------------------------------------------- void create_particle_box (float sx, float sy, float sz, float d, unsigned int numParticles, float** particleVD, float** particleSD, unsigned int* numParticlesCreated) { // Creates a set of particles, that are aligned in a cube, given the starting // point of the box [sx, sy, sz] the length of the cube in each direction [d] // and the approximate amount of total particles [numParticles]. // // Returns a pointer to the vertex data of the particles in [particleVD] and // a pointer to the simulation data of the particles in [particleSD] and the // actual amount of particles created // computed number of particles in each direction unsigned int num = pow(static_cast<double>(numParticles), 1.0/3.0); *numParticlesCreated = num*num*num; *particleVD = new float[*numParticlesCreated*VD_NUM_ELEMENTS]; *particleSD = new float[*numParticlesCreated*SD_NUM_ELEMENTS]; // check if new failed. if ((*particleSD) == NULL || (*particleSD) == NULL) { *numParticlesCreated = 0; return; } // compute spatial increment float dx = d/static_cast<float>(num - 1); // seed the particles inside the cube // set the position of each particle unsigned int idx; for (unsigned int k = 0; k < num; k++) { for (unsigned int j = 0; j < num; j++) { for (unsigned int i = 0; i < num; i++) { idx = VD_NUM_ELEMENTS*(num*(num*k+j)+i); (*particleVD)[idx + VD_POS_X] = sx + i*dx; (*particleVD)[idx + VD_POS_Y] = sy + j*dx; (*particleVD)[idx + VD_POS_Z] = sz + k*dx; } } } // set other particles attributes to 0.0f memset((*particleSD), 0, sizeof(float)*SD_NUM_ELEMENTS*(*numParticlesCreated)); } //----------------------------------------------------------------------------- void set_simulation_domain (float xs, float ys, float zs, float xe, float ye, float ze, float gridSpacing, float gridSpacingSubParticles, SimulationParameters* parameters) { // Sets the simulation domain in the [parameters], based on a starting point // [xs, ys, zs] an ending point [xe, ye, ze] and the distance between two // grid points [gridSpacing]. parameters->gridOrigin[0] = xs; parameters->gridOrigin[1] = ys; parameters->gridOrigin[2] = zs; parameters->gridDim[0] = static_cast<int>((xe - xs)/gridSpacing + 0.5); parameters->gridDim[1] = static_cast<int>((ye - ys)/gridSpacing + 0.5); parameters->gridDim[2] = static_cast<int>((ze - zs)/gridSpacing + 0.5); parameters->gridDimSubParticles[0] = static_cast<int>((xe - xs)/gridSpacingSubParticles + 0.5); parameters->gridDimSubParticles[1] = static_cast<int>((ye - ys)/gridSpacingSubParticles + 0.5); parameters->gridDimSubParticles[2] = static_cast<int>((ze - zs)/gridSpacingSubParticles + 0.5); parameters->gridSpacing = gridSpacing; parameters->gridSpacingSubParticles = gridSpacingSubParticles; } //----------------------------------------------------------------------------- void compute_particle_kernel_invocation_information (unsigned int& nThreadsBlock, unsigned int& nBlocks, unsigned int numParticles) { if (numParticles == 0) { nThreadsBlock = 0; nBlocks = 0; return; } nThreadsBlock = numParticles > 256 ? 256 : numParticles; nBlocks = numParticles % nThreadsBlock == 0 ? numParticles/nThreadsBlock : numParticles/nThreadsBlock + 1; } //----------------------------------------------------------------------------- void set_up_3d_float_texture (struct textureReference* texRef, hipArray* arr, float* data, unsigned int dim[3]) { // debug! set all arr vals to 0.5f /*for (unsigned int i = 0; i < dim[0]*dim[1]*dim[2]; i++) { if (i % 2) { data[i] = 1.0f; } else { data[i] = 0.5f; } }*/ // set allocation parmeters hipChannelFormatDesc descf = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipExtent extent; extent.width = dim[0]; extent.height = dim[1]; extent.depth = dim[2]; // alloc cuda array CUDA_SAFE_CALL( hipMalloc3DArray(&arr, &descf, extent) ); // set copy parameters hipMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(hipMemcpy3DParms)); copyParams.srcPtr = make_hipPitchedPtr((void *)data, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = arr; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; // transfer 3d data to cuda array CUDA_SAFE_CALL( hipMemcpy3D(&copyParams) ); // set texture parameters texRef->normalized = true; texRef->filterMode = hipFilterModeLinear; texRef->addressMode[0] = hipAddressModeClamp; texRef->addressMode[1] = hipAddressModeClamp; texRef->addressMode[2] = hipAddressModeClamp; // bind array to global texture hipBindTextureToArray(texRef, arr, &descf); }
eaedf9eaaccab0fc1d415da48079d0bf587e763f.cu
#include <thrust\sort.h> #include <thrust\device_ptr.h> #include <thrust\for_each.h> #include <thrust\iterator\zip_iterator.h> #include <iostream> #include <iomanip> #include <fstream> #include "particle_simulation.h" #include "util.h" #include "cgtk\include\clock.h" #include "boundary_map.h" #include <thrust\scan.h> #include <stdexcept> #include "portable_pixmap.h" #include "arr.h" using namespace std; //----------------------------------------------------------------------------- // DEVICE CODE //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // global device variables //----------------------------------------------------------------------------- __constant__ SimulationParameters gSimParamsDev; texture<float, cudaTextureType1D, cudaReadModeElementType> gParticleVertexData; texture<float, cudaTextureType1D, cudaReadModeElementType> gParticleSimulationData; texture<int, cudaTextureType1D, cudaReadModeElementType> gCellStartList; texture<int, cudaTextureType1D, cudaReadModeElementType> gCellEndList; texture<int, cudaTextureType1D, cudaReadModeElementType> gSortedParticleIdList; texture<int, cudaTextureType1D, cudaReadModeElementType> gParticleHashList; // information about boundary handling __constant__ float gBoundaryGridOrigin[3]; __constant__ float gBoundaryGridSpacing; __constant__ unsigned int gBoundaryGridDimensions[3]; __constant__ float gBoundaryGridLength[3]; __constant__ float gBoundaryRestDistance; texture<float, cudaTextureType3D, cudaReadModeElementType> gBoundaryDistances; texture<float, cudaTextureType3D, cudaReadModeElementType> gBoundaryDensities; texture<float, cudaTextureType3D, cudaReadModeElementType> gBoundaryViscosities; //----------------------------------------------------------------------------- // declaration of aux. functions (device) //----------------------------------------------------------------------------- __device__ inline int3 compute_grid_coordinate (float3 pos, float d); __device__ inline int3 compute_grid_coordinate_sub_particles(float3 pos, float d); __device__ inline int compute_hash_from_grid_coordinate (int i, int j, int k); __device__ inline int compute_hash_from_grid_coordinate_sub_particle (int i, int j, int k); __device__ inline float compute_distance (float3 a, float3 b); __device__ inline float compute_squared_distance (float3 a, float3 b); __device__ inline float norm (const float3& a); __device__ inline void normalize (float3& a); __device__ inline float dot_product (const float3& a, const float3& b); __device__ float compute_particle_density_cell (const float3 &pos, float* pParticleList, int* pParticleIdList, int start, int end); __device__ float compute_sub_particle_density_cell (const float3 &pos, float* particleVertexData, int* particleIdList, int start, int end); __device__ inline void compute_viscosity_pressure_forces_and_ifsurf_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* pressureForce, float3* viscosityForce, float3* colGra, float* colLapl, float3* sumPosNeighbor, float* nNeighbors); __device__ inline void compute_sub_particle_viscosity_pressure_forces_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* force, float3* colGra, float* colLapl); __device__ inline void project_quantities_cell (float3& acc, float& density, float& pressure, float& numNeighbors, const float3& xi, int start, int end); //----------------------------------------------------------------------------- // CUDA Kernel definitions //----------------------------------------------------------------------------- __global__ void compute_particle_hash (float* particleVertexData, int* particleIdList, int* particleHashList, unsigned int numParticles) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx >= numParticles) { return; } // calculate corresponding gridpoint int x = (int)((tex1Dfetch(gParticleVertexData, idx*VD_NUM_ELEMENTS + VD_POS_X) - gSimParamsDev.gridOrigin[0])/gSimParamsDev.gridSpacing); int y = (int)((tex1Dfetch(gParticleVertexData, idx*VD_NUM_ELEMENTS + VD_POS_Y) - gSimParamsDev.gridOrigin[1])/gSimParamsDev.gridSpacing); int z = (int)((tex1Dfetch(gParticleVertexData, idx*VD_NUM_ELEMENTS + VD_POS_Z) - gSimParamsDev.gridOrigin[2])/gSimParamsDev.gridSpacing); // wrap outer particles to grid // TODO: modulo operation using "&" is faster, requires grid dims of // power of two x = x % gSimParamsDev.gridDim[0]; y = y % gSimParamsDev.gridDim[1]; z = z % gSimParamsDev.gridDim[2]; // calculate hash, i.e. grid cell id int hash = gSimParamsDev.gridDim[0]*(gSimParamsDev.gridDim[1]*z + y) + x; particleIdList[idx] = idx; particleHashList[idx] = hash; } //----------------------------------------------------------------------------- __global__ void compute_sub_particle_hash (float* particleVertexData, int* particleIdList, int* particleHashList, unsigned int numParticles) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = particleIdList[idx]; // calculate corresponding gridpoint int x = (int)((particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X] - gSimParamsDev.gridOrigin[0])/gSimParamsDev.gridSpacingSubParticles); int y = (int)((particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y] - gSimParamsDev.gridOrigin[1])/gSimParamsDev.gridSpacingSubParticles); int z = (int)((particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z] - gSimParamsDev.gridOrigin[2])/gSimParamsDev.gridSpacingSubParticles); // wrap outer particles to grid // TODO: modulo operation using "&" is faster, requires grid dims of // power of two x = x % gSimParamsDev.gridDimSubParticles[0]; y = y % gSimParamsDev.gridDimSubParticles[1]; z = z % gSimParamsDev.gridDimSubParticles[2]; // calculate hash, i.e. grid cell id int hash = gSimParamsDev.gridDimSubParticles[0]* (gSimParamsDev.gridDimSubParticles[1]*z + y) + x; particleHashList[idx] = hash; } //----------------------------------------------------------------------------- __global__ void compute_cell_start_end (int* particleHashList, int* cellStartList, int* cellEndList, unsigned int numParticles) { extern __shared__ int sharedHash[]; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; int hash; if (idx < numParticles) { hash = particleHashList[idx]; sharedHash[threadIdx.x + 1] = hash; if (idx > 0 && threadIdx.x == 0) { sharedHash[0] = particleHashList[idx - 1]; } } __syncthreads(); if (idx < numParticles) { if (idx == 0 || hash != sharedHash[threadIdx.x]) { cellStartList[hash] = idx; if (idx > 0) { cellEndList[sharedHash[threadIdx.x]] = idx; } } if (idx == numParticles - 1) { cellEndList[hash] = idx + 1; } } } //----------------------------------------------------------------------------- // Compute density and pressure for each particle __global__ void compute_particle_density_pressure (float* particleVertexData, float* particleSimulationData, int* particleIdList, int* cellStartList, int* cellEndList) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= gSimParamsDev.numParticles) { return; } int id = particleIdList[idx]; float density = 0.0f; float pressure; float3 pos; // get particles position form vertex data pos.x = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_X); pos.y = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Y); pos.z = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Z); int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupport); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupport); int hash; int start; int end; // compute density contribution from neighbor particles for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = tex1Dfetch(gCellStartList, hash); end = tex1Dfetch(gCellEndList, hash); density += compute_particle_density_cell(pos, particleVertexData, particleIdList, start, end); } } } density *= gSimParamsDev.particleMass; // compute density contribution from the wall float u = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float v = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float w = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float densWall = tex3D(gBoundaryDensities, u, v, w); density += densWall; pressure = gSimParamsDev.gasStiffness*(density - gSimParamsDev.restDensity); // set density and pressure particleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY] = density; particleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE] = pressure; } //----------------------------------------------------------------------------- // Compute density and pressure for each sub particle __global__ void compute_sub_particle_density_pressure (float* subParticleVertexData, float* subParticleSimulationData, int* particleIdList, int* particleSortedIdList, int* cellStartList, int* cellEndList, unsigned int numParticles) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx >= numParticles) { return; } int id = particleIdList[idx]; float density = 0.0f; float pressure; float3 pos; // get particles position form vertex data pos.x = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; int3 c0 = compute_grid_coordinate_sub_particles(pos, -gSimParamsDev.compactSupportSub); int3 c1 = compute_grid_coordinate_sub_particles(pos, gSimParamsDev.compactSupportSub); int hash; int start; int end; for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate_sub_particle(i, j, k); start = cellStartList[hash]; end = cellEndList[hash]; density += compute_sub_particle_density_cell(pos, subParticleVertexData, particleSortedIdList, start, end); } } } density *= gSimParamsDev.subParticleMass; pressure = gSimParamsDev.gasStiffness*(density - gSimParamsDev.restDensity); // set density and pressure subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY] = density; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE] = pressure; } //----------------------------------------------------------------------------- __global__ void compute_particle_acceleration_ifsurf (float* particleVertexData, float* particleSimulationData, int* particleIdList, int* cellStartList, int* cellEndList, int* isSurfaceParticle) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } int id = tex1Dfetch(gSortedParticleIdList, idx); float density = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_DENSITY); float pressure = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_PRESSURE); float tenCoeff = gSimParamsDev.tensionCoefficient; float3 pos; pos.x = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_X); pos.y = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Y); pos.z = tex1Dfetch(gParticleVertexData, id*VD_NUM_ELEMENTS + VD_POS_Z); float3 vel; vel.x = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_VEL0_X); vel.y = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_VEL0_Y); vel.z = tex1Dfetch(gParticleSimulationData, id*SD_NUM_ELEMENTS + SD_VEL0_Z); int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupport); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupport); float3 force; force.x = 0.0f; force.y = 0.0f; force.z = 0.0f; float3 pressureForce; pressureForce.x = 0.0f; pressureForce.y = 0.0f; pressureForce.z = 0.0f; float3 viscosityForce; viscosityForce.x = 0.0f; viscosityForce.y = 0.0f; viscosityForce.z = 0.0f; float3 colGra; colGra.x = 0.0f; colGra.y = 0.0f; colGra.z = 0.0f; // [sumPosNeigbor] and [nNeigbors] are used to computed the center of mass // of the neighborhood of this particle (this also includes the particle // itself float3 sumPosNeighbor; sumPosNeighbor.x = pos.x; sumPosNeighbor.x = pos.y; sumPosNeighbor.x = pos.z; float nNeighbors = 1.0f; float colLapl; float colGraNorm; float grav = gSimParamsDev.gravity; int hash; int start; int end; // compute viscosity and pressure forces for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = tex1Dfetch(gCellStartList, hash); end = tex1Dfetch(gCellEndList, hash); compute_viscosity_pressure_forces_and_ifsurf_cell(pos, density, pressure, vel, particleVertexData, particleSimulationData, particleIdList, start, end, &pressureForce, &viscosityForce, &colGra, &colLapl, &sumPosNeighbor, &nNeighbors); } } } // compute distance to wall float u = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float v = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float w = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float distWall = -tex3D(gBoundaryDistances, u, v, w); // add viscosity force force.x += viscosityForce.x; force.y += viscosityForce.y; force.z += viscosityForce.z; // add pressure force force.x += pressureForce.x; force.y += pressureForce.y; force.z += pressureForce.z; float coeff = density/ (gSimParamsDev.timeStep*gSimParamsDev.timeStep)* (gBoundaryRestDistance - distWall); if (distWall < gBoundaryRestDistance) { float dX = gBoundaryGridSpacing/gBoundaryGridLength[0]; float dY = gBoundaryGridSpacing/gBoundaryGridLength[1]; float dZ = gBoundaryGridSpacing/gBoundaryGridLength[2]; float3 graN; graN.x = (tex3D(gBoundaryDistances, u + dX, v, w) - tex3D(gBoundaryDistances, u - dX, v, w))/(2*dX); graN.y = (tex3D(gBoundaryDistances, u, v + dY, w) - tex3D(gBoundaryDistances, u, v - dY, w))/(2*dY); graN.z = (tex3D(gBoundaryDistances, u, v, w + dZ) - tex3D(gBoundaryDistances, u, v, w - dZ))/(2*dZ); //normalize(graN); // in boundary handling case just, add the pressure force to the force force.x -= coeff*graN.x; force.y -= coeff*graN.y; force.z -= coeff*graN.z; //// viscosity contribution of the wall float visWallCoeff = tex3D(gBoundaryViscosities, u, v, w); force.x -= vel.x*visWallCoeff; force.y -= vel.y*visWallCoeff; force.z -= vel.z*visWallCoeff; } else { // add surface tension force colGraNorm = sqrtf(colGra.x*colGra.x + colGra.y*colGra.y + colGra.z*colGra.z); float fCoeff = tenCoeff*colLapl/colGraNorm; if(colGraNorm > gSimParamsDev.normThresh) { force.x -= fCoeff*colGra.x; force.y -= fCoeff*colGra.y; force.z -= fCoeff*colGra.z; } } // store the actual acceleration particleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_X] = force.x/density; particleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Y] = force.y/density - grav; particleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Z] = force.z/density; } //----------------------------------------------------------------------------- __global__ void compute_sub_particle_acceleration (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIdList, int* subParticleSortedIdList, int* cellStartList, int* cellEndList, unsigned int numParticles) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = subParticleIdList[idx]; float density = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY]; float pressure = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE]; float tenCoeff = gSimParamsDev.tensionCoefficient; float3 pos; pos.x = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; float3 vel; vel.x = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_X]; vel.y = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Y]; vel.z = subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Z]; int3 c0 = compute_grid_coordinate_sub_particles(pos, -gSimParamsDev.compactSupportSub); int3 c1 = compute_grid_coordinate_sub_particles(pos, gSimParamsDev.compactSupportSub); float3 force; force.x = 0.0f; force.y = 0.0f; force.z = 0.0f; float3 colGra; colGra.x = 0.0f; colGra.y = 0.0f; colGra.z = 0.0f; float colLapl; float colGraNorm; float grav = gSimParamsDev.gravity; int hash; int start; int end; // compute viscosity and pressure forces for (int k = c0.z; k <= c1.z; k++) { for (int j = c0.y; j <= c1.y; j++) { for (int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate_sub_particle(i, j, k); start = cellStartList[hash]; end = cellEndList[hash]; compute_sub_particle_viscosity_pressure_forces_cell(pos, density, pressure, vel, subParticleVertexData, subParticleSimulationData, subParticleSortedIdList, start, end, &force, &colGra, &colLapl); } } } // surface tension colGraNorm = sqrtf(colGra.x*colGra.x + colGra.y*colGra.y + colGra.z*colGra.z); float fCoeff = tenCoeff*colLapl/colGraNorm; if (colGraNorm > gSimParamsDev.normThresh) { force.x -= fCoeff*colGra.x; force.y -= fCoeff*colGra.y; force.z -= fCoeff*colGra.z; } // store the actual acceleration subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_X] = force.x/density; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Y] = force.y/density - grav; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_ACC_Z] = force.z/density; } //---------------------------------------------------------------------------- __global__ void project_quantities (float* subParticleVertexData, float* subParticleSimulationData, float* particleVertexData, float* particleSimulationData, int* subParticleIds, unsigned int numParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = subParticleIds[idx + offset]; float3 pos; pos.x = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = subParticleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupportSub); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupportSub); float3 acc; acc.x = 0.0f; acc.y = 0.0f; acc.z = 0.0f; float density = 0.0f; float pressure = 0.0f; float numNeighbours = 0.0f; int hash; int start; int end; // compute viscosity and pressure forces for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = tex1Dfetch(gCellStartList, hash); end = tex1Dfetch(gCellEndList, hash); project_quantities_cell(acc, density, pressure, numNeighbours, pos, start, end); } } } subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY] = density/numNeighbours; subParticleSimulationData[id*SD_NUM_ELEMENTS + SD_PRESSURE] = pressure/numNeighbours; } //---------------------------------------------------------------------------- /*__global__ void compute_sub_particle_acceleration (float* particleVertexData, float* particleSimulationData, int* particleIdList, int* cellStartList, int* cellEndList, int* isSurfaceParticle) { }*/ //----------------------------------------------------------------------------- __global__ void integrate_euler (float* particleVertexData, float* particleSimulationData) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int idVert = idx*VD_NUM_ELEMENTS; unsigned int idSim = idx*SD_NUM_ELEMENTS; float dt = gSimParamsDev.timeStep; particleSimulationData[idSim + SD_VEL0_X] += dt*particleSimulationData[idSim + SD_ACC_X]; particleSimulationData[idSim + SD_VEL0_Y] += dt*particleSimulationData[idSim + SD_ACC_Y]; particleSimulationData[idSim + SD_VEL0_Z] += dt*particleSimulationData[idSim + SD_ACC_Z]; particleVertexData[idVert + VD_POS_X] += dt*particleSimulationData[idSim + SD_VEL0_X]; particleVertexData[idVert + VD_POS_Y] += dt*particleSimulationData[idSim + SD_VEL0_Y]; particleVertexData[idVert + VD_POS_Z] += dt*particleSimulationData[idSim + SD_VEL0_Z]; // compute density contribution from the wall /*float u = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float v = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float w = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float distWall = tex3D(gBoundaryDistances, u, v, w);*/ } //----------------------------------------------------------------------------- __global__ void integrate_sub_particles_euler (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIds, unsigned int nSubParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= nSubParticles) { return; } int id = subParticleIds[idx + offset]; unsigned int idVert = id*VD_NUM_ELEMENTS; unsigned int idSim = id*SD_NUM_ELEMENTS; float dt = gSimParamsDev.timeStepSubParticles; subParticleSimulationData[idSim + SD_VEL0_X] += dt*subParticleSimulationData[idSim + SD_ACC_X]; subParticleSimulationData[idSim + SD_VEL0_Y] += dt*subParticleSimulationData[idSim + SD_ACC_Y]; subParticleSimulationData[idSim + SD_VEL0_Z] += dt*subParticleSimulationData[idSim + SD_ACC_Z]; subParticleVertexData[idVert + VD_POS_X] += dt*subParticleSimulationData[idSim + SD_VEL0_X]; subParticleVertexData[idVert + VD_POS_Y] += dt*subParticleSimulationData[idSim + SD_VEL0_Y]; subParticleVertexData[idVert + VD_POS_Z] += dt*subParticleSimulationData[idSim + SD_VEL0_Z]; } //----------------------------------------------------------------------------- __global__ void integrate_boundary_sub_particles_euler (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIds, unsigned int nSubParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= nSubParticles) { return; } int id = subParticleIds[idx + offset]; unsigned int idVert = id*VD_NUM_ELEMENTS; unsigned int idSim = id*SD_NUM_ELEMENTS; float dt = gSimParamsDev.timeStep; /* subParticleSimulationData[idSim + SD_VEL0_X] += dt*subParticleSimulationData[idSim + SD_ACC_X]; subParticleSimulationData[idSim + SD_VEL0_Y] += dt*subParticleSimulationData[idSim + SD_ACC_Y]; subParticleSimulationData[idSim + SD_VEL0_Z] += dt*subParticleSimulationData[idSim + SD_ACC_Z]; */ subParticleVertexData[idVert + VD_POS_X] += dt*subParticleSimulationData[idSim + SD_VEL0_X]; subParticleVertexData[idVert + VD_POS_Y] += dt*subParticleSimulationData[idSim + SD_VEL0_Y]; subParticleVertexData[idVert + VD_POS_Z] += dt*subParticleSimulationData[idSim + SD_VEL0_Z]; } //----------------------------------------------------------------------------- __global__ void shift_state (char* particleState) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } particleState[idx] = (particleState[idx] << 2); } //----------------------------------------------------------------------------- __global__ void collision_handling (float* particleVertexData, float* particleSimulationData) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int idVert = idx*VD_NUM_ELEMENTS; unsigned int idSim = idx*SD_NUM_ELEMENTS; float3 pos; float3 vel; pos.x = tex1Dfetch(gParticleVertexData, idVert + VD_POS_X); pos.y = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Y); pos.z = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Z); vel.x = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_X); vel.y = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Y); vel.z = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Z); float3 local; float3 diff; float3 nrm; float dist; float depth; // compute "distance" to box, if positive the particle // is outside the box. // compute local position of the particle to the box local.x = pos.x - gSimParamsDev.boxCen[0]; local.y = pos.y - gSimParamsDev.boxCen[1]; local.z = pos.z - gSimParamsDev.boxCen[2]; // project local pos to the upper right quadrand and // compute difference to the boxDim vec diff.x = abs(local.x) - gSimParamsDev.boxDim[0]; diff.y = abs(local.y) - gSimParamsDev.boxDim[1]; diff.z = abs(local.z) - gSimParamsDev.boxDim[2]; dist = max(diff.x, diff.y); dist = max(dist, diff.z); // if the particle lies outside the box, the collision must be handled float3 contact; if (dist > 0.0f) { // contact point in "box space" contact.x = min(gSimParamsDev.boxDim[0], max(-gSimParamsDev.boxDim[0], local.x)); contact.y = min(gSimParamsDev.boxDim[1], max(-gSimParamsDev.boxDim[1], local.y)); contact.z = min(gSimParamsDev.boxDim[2], max(-gSimParamsDev.boxDim[2], local.z)); // translate to worldspace contact.x += gSimParamsDev.boxCen[0]; contact.y += gSimParamsDev.boxCen[1]; contact.z += gSimParamsDev.boxCen[2]; // compute penetration depth depth = compute_distance(contact, pos); // compute normal nrm.x = pos.x - contact.x; nrm.y = pos.y - contact.y; nrm.z = pos.z - contact.z; normalize(nrm); float velNorm = norm(vel); float dp = dot_product(nrm, vel); float coeff = (1 + gSimParamsDev.restitution*depth/ (gSimParamsDev.timeStep*velNorm))*dp; vel.x -= coeff*nrm.x; vel.y -= coeff*nrm.y; vel.z -= coeff*nrm.z; particleVertexData[idVert + VD_POS_X] = contact.x; particleVertexData[idVert + VD_POS_Y] = contact.y; particleVertexData[idVert + VD_POS_Z] = contact.z; particleSimulationData[idSim + SD_VEL0_X] = vel.x; particleSimulationData[idSim + SD_VEL0_Y] = vel.y; particleSimulationData[idSim + SD_VEL0_Z] = vel.z; } } //----------------------------------------------------------------------------- __global__ void collision_handling_sub_particles (float* subParticleVertexData, float* subParticleSimulationData, int* subParticleIds, unsigned int numParticles, unsigned int offset) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } int id = subParticleIds[idx + offset]; unsigned int idVert = id*VD_NUM_ELEMENTS; unsigned int idSim = id*SD_NUM_ELEMENTS; float3 pos; float3 vel; pos.x = subParticleVertexData[idVert + VD_POS_X]; pos.y = subParticleVertexData[idVert + VD_POS_Y]; pos.z = subParticleVertexData[idVert + VD_POS_Z]; vel.x = subParticleSimulationData[idSim + SD_VEL0_X]; vel.y = subParticleSimulationData[idSim + SD_VEL0_Y]; vel.z = subParticleSimulationData[idSim + SD_VEL0_Z]; float3 local; float3 diff; float3 nrm; float dist; float depth; // compute "distance" to box, if positive the particle // is outside the box. // compute local position of the particle to the box local.x = pos.x - gSimParamsDev.boxCen[0]; local.y = pos.y - gSimParamsDev.boxCen[1]; local.z = pos.z - gSimParamsDev.boxCen[2]; // project local pos to the upper right quadrand and // compute difference to the boxDim vec diff.x = abs(local.x) - gSimParamsDev.boxDim[0]; diff.y = abs(local.y) - gSimParamsDev.boxDim[1]; diff.z = abs(local.z) - gSimParamsDev.boxDim[2]; dist = max(diff.x, diff.y); dist = max(dist, diff.z); // if the particle lies outside the box, the collision must be handled float3 contact; if (dist > 0.0f) { // contact point in "box space" contact.x = min(gSimParamsDev.boxDim[0], max(-gSimParamsDev.boxDim[0], local.x)); contact.y = min(gSimParamsDev.boxDim[1], max(-gSimParamsDev.boxDim[1], local.y)); contact.z = min(gSimParamsDev.boxDim[2], max(-gSimParamsDev.boxDim[2], local.z)); // translate to worldspace contact.x += gSimParamsDev.boxCen[0]; contact.y += gSimParamsDev.boxCen[1]; contact.z += gSimParamsDev.boxCen[2]; // compute penetration depth depth = compute_distance(contact, pos); // compute normal nrm.x = pos.x - contact.x; nrm.y = pos.y - contact.y; nrm.z = pos.z - contact.z; normalize(nrm); float velNorm = norm(vel); float dp = dot_product(nrm, vel); float coeff = (1 + gSimParamsDev.restitution*depth/ (gSimParamsDev.timeStep*velNorm))*dp; vel.x -= coeff*nrm.x; vel.y -= coeff*nrm.y; vel.z -= coeff*nrm.z; subParticleVertexData[idVert + VD_POS_X] = contact.x; subParticleVertexData[idVert + VD_POS_Y] = contact.y; subParticleVertexData[idVert + VD_POS_Z] = contact.z; subParticleSimulationData[idSim + SD_VEL0_X] = vel.x; subParticleSimulationData[idSim + SD_VEL0_Y] = vel.y; subParticleSimulationData[idSim + SD_VEL0_Z] = vel.z; } } //----------------------------------------------------------------------------- __global__ void find_split_particles (float* particleVertexData, char* particleState, int* particleIdList, int* cellStartList, int* cellEndList) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int id = particleIdList[idx]; float3 pos; pos.x = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; if (pos.x >= 0.2f && pos.x <= 0.5f) { particleState[id] |= 1; } } //----------------------------------------------------------------------------- __global__ void find_boundary_particles (float* particleVertexData, char* particleState, int* particleIdList, int* cellStartList, int* cellEndList) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } unsigned int id = particleIdList[idx]; unsigned int state = particleState[id] & 3; float3 pos; float3 xj; float3 r; float rn; if (state == 1) { // distribute information to neigbors pos.x = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; int3 c0 = compute_grid_coordinate(pos, -gSimParamsDev.compactSupport); int3 c1 = compute_grid_coordinate(pos, gSimParamsDev.compactSupport); int hash; int start; int end; for(int k = c0.z; k <= c1.z; k++) { for(int j = c0.y; j <= c1.y; j++) { for(int i = c0.x; i <= c1.x; i++) { hash = compute_hash_from_grid_coordinate(i, j, k); start = cellStartList[hash]; end = cellEndList[hash]; for (int u = start; u < end; u++) { int v = particleIdList[u]; xj.x = particleVertexData[v*VD_NUM_ELEMENTS + VD_POS_X]; xj.y = particleVertexData[v*VD_NUM_ELEMENTS + VD_POS_Y]; xj.z = particleVertexData[v*VD_NUM_ELEMENTS + VD_POS_Z]; r.x = pos.x - xj.x; r.y = pos.y - xj.y; r.z = pos.z - xj.z; rn = r.x*r.x + r.y*r.y + r.z*r.z; if (rn <= gSimParamsDev.compactSupport* gSimParamsDev.compactSupport) { particleState[v] |= 2; } } } } } } } //----------------------------------------------------------------------------- // sets the sub particle vertex & simulation data for each particle, that has // changed its state from "default" to "boundary" or "split". // This kernel is called for particles that were split and for boundary // particles. [numParticles] refers to the total number of those particles. __global__ void initialize_sub_particles (float* subParticleVertexData, float* subParticleSimulationData, int* particleIds, float* particleVertexData, float* particleSimulationData, char* particleStates, unsigned int numParticles) { #define SQRT3INV 0.577350269 // = 1/sqrt{3} // directions to seed the new sub particles const float directions[] = { SQRT3INV, SQRT3INV, SQRT3INV, SQRT3INV, SQRT3INV, -SQRT3INV, SQRT3INV, -SQRT3INV, SQRT3INV, SQRT3INV, -SQRT3INV, -SQRT3INV, -SQRT3INV, SQRT3INV, SQRT3INV, -SQRT3INV, SQRT3INV, -SQRT3INV, -SQRT3INV, -SQRT3INV, SQRT3INV, -SQRT3INV, -SQRT3INV, -SQRT3INV }; unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numParticles) { return; } unsigned int id = particleIds[idx]; char state = particleStates[id] & 15; // if parent particle makes transition from "default" -> "split" (3) // "default" -> "boundary" (2), "split" -> "boundary" (14) the sub particle needs // to be reinitialized // if (state == 2 || state == 3 || state == 14 || state == 11) { float density = particleSimulationData[id*SD_NUM_ELEMENTS + SD_DENSITY]; float radicand = 3.0f*gSimParamsDev.particleMass/(4.0f*M_PI*density); float radius = pow(radicand, 1.0f/3.0f); float3 pos; pos.x = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_X]; pos.y = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Y]; pos.z = particleVertexData[id*VD_NUM_ELEMENTS + VD_POS_Z]; // ... initialize initial position and velocity of the corresponding // sub particles for (unsigned int i = 0; i < 8; i++) { // update velocity int index = (8*id + i)*SD_NUM_ELEMENTS; subParticleSimulationData[index + SD_VEL0_X] = particleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_X]; subParticleSimulationData[index + SD_VEL0_Y] = particleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Y]; subParticleSimulationData[index + SD_VEL0_Z] = particleSimulationData[id*SD_NUM_ELEMENTS + SD_VEL0_Z]; // update position index = (8*id + i)*VD_NUM_ELEMENTS; subParticleVertexData[index + VD_POS_X] = pos.x + directions[3*i + 0]*radius; subParticleVertexData[index + VD_POS_Y] = pos.y + directions[3*i + 1]*radius; subParticleVertexData[index + VD_POS_Z] = pos.z + directions[3*i + 2]*radius; } } #undef SQRT3INV } //----------------------------------------------------------------------------- __global__ void check_split_boundary_default (char* particleState, int* isSplit, int* isBoundary, int* isDefault) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; char state = particleState[idx] & 3; if (idx >= gSimParamsDev.numParticles) { return; } if (state == 0) { isDefault[idx] = 1; } else if (state == 2) { isBoundary[idx] = 1; } else { isSplit[idx] = 1; } } //----------------------------------------------------------------------------- __global__ void collect_ids (int* subParticleIdList, int* sortedSubParticleIdList,int* splitParticleIdList, int* boundaryParticleIdList, int* defaultParticleIdList, int* isSplit, int* isBoundary, int* isDefault, int* splitPrefixSum, int* boundaryPrefixSum, int* defaultPrefixSum, unsigned int numParticlesSplit) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= gSimParamsDev.numParticles) { return; } if (isSplit[idx] == 1) { int splitPreSum = splitPrefixSum[idx]; for (unsigned int i = 0; i < 8; i++) { subParticleIdList[8*splitPreSum + i] = 8*idx + i; sortedSubParticleIdList[8*splitPreSum + i] = 8*idx + i; } splitParticleIdList[splitPreSum] = idx; } else if (isBoundary[idx] == 1) { int boundaryPreSum = boundaryPrefixSum[idx]; for (unsigned int i = 0; i < 8; i++) { subParticleIdList[8*(numParticlesSplit + boundaryPreSum) + i] = 8*idx + i; sortedSubParticleIdList[8*(numParticlesSplit + boundaryPreSum) + i] = 8*idx + i; } boundaryParticleIdList[boundaryPreSum] = idx; } else { defaultParticleIdList[defaultPrefixSum[idx]] = idx; } } //__global__ void collision_handling(float* particleVertexData, // float* particleSimulationData) //{ // /*unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; // // if (idx >= gSimParamsDev.numParticles) // { // return; // } // // unsigned int idVert = idx*VD_NUM_ELEMENTS; // unsigned int idSim = idx*SD_NUM_ELEMENTS; // // float3 pos; // float3 vel; // // pos.x = tex1Dfetch(gParticleVertexData, idVert + VD_POS_X); // pos.y = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Y); // pos.z = tex1Dfetch(gParticleVertexData, idVert + VD_POS_Z); // // vel.x = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_X); // vel.y = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Y); // vel.z = tex1Dfetch(gParticleSimulationData, idSim + SD_VEL0_Z); // // // // unsigned int i,j,k; // i = (unsigned int)((pos.x - gBoundaryOrigin[0])/gDx); // j = (unsigned int)((pos.y - gBoundaryOrigin[1])/gDx); // k = (unsigned int)((pos.z - gBoundaryOrigin[2])/gDx); // unsigned int idx2 = i + gnBoundarySamples[0]*(j + gnBoundarySamples[1]*k); // unsigned int nodeIdx = tex1Dfetch(gIndexMap, idx2); // float dist = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_DISTANCE); // // float3 bNorm; // // bNorm.x = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_NORMAL_X); // bNorm.y = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_NORMAL_Y); // bNorm.z = tex1Dfetch(gNodeTable, NC_NUM_ELEMENTS*nodeIdx + NC_NORMAL_Z); // // if (bNorm.y != 0.0f) // { // particleVertexData[idVert + VD_POS_X] -= gSimParamsDev.timeStep*vel.x; // particleVertexData[idVert + VD_POS_Y] -= gSimParamsDev.timeStep*vel.y; // particleVertexData[idVert + VD_POS_Z] -= gSimParamsDev.timeStep*vel.z; // }*/ //} //----------------------------------------------------------------------------- __global__ void create_density_slice (float* data, unsigned int width, unsigned int height, unsigned int depth) { int u = blockIdx.x*blockDim.x + threadIdx.x; int v = blockIdx.y*blockDim.y + threadIdx.y; unsigned int idx = width*v + u; if (u >= width || v >= height) { return; } float3 pos; pos.x = -1.0f + gSimParamsDev.gridSpacing*u; pos.y = gSimParamsDev.gridOrigin[1] + gSimParamsDev.gridSpacing*v; pos.z = gSimParamsDev.gridOrigin[2] + gSimParamsDev.gridSpacing*depth; float tu = (pos.x - gBoundaryGridOrigin[0])/gBoundaryGridLength[0]; float tv = (pos.y - gBoundaryGridOrigin[1])/gBoundaryGridLength[1]; float tw = (pos.z - gBoundaryGridOrigin[2])/gBoundaryGridLength[2]; float density = tex3D(gBoundaryDistances, tu, tv, tw); data[idx] = density; } //----------------------------------------------------------------------------- // definition of aux. functions (device) //----------------------------------------------------------------------------- __device__ inline int3 compute_grid_coordinate(float3 pos, float d) { int3 gridCoord; gridCoord.x = (unsigned int)((pos.x + d - gSimParamsDev.gridOrigin[0])/ gSimParamsDev.gridSpacing); gridCoord.y = (unsigned int)((pos.y + d - gSimParamsDev.gridOrigin[1])/ gSimParamsDev.gridSpacing); gridCoord.z = (unsigned int)((pos.z + d - gSimParamsDev.gridOrigin[2])/ gSimParamsDev.gridSpacing); gridCoord.x = gridCoord.x%gSimParamsDev.gridDim[0]; gridCoord.y = gridCoord.y%gSimParamsDev.gridDim[1]; gridCoord.z = gridCoord.z%gSimParamsDev.gridDim[2]; gridCoord.x = min(max(gridCoord.x, 0),gSimParamsDev.gridDim[0] - 1); gridCoord.y = min(max(gridCoord.y, 0),gSimParamsDev.gridDim[1] - 1); gridCoord.z = min(max(gridCoord.z, 0),gSimParamsDev.gridDim[2] - 1); return gridCoord; } //----------------------------------------------------------------------------- __device__ inline int3 compute_grid_coordinate_sub_particles(float3 pos, float d) { int3 gridCoord; gridCoord.x = (unsigned int)((pos.x + d - gSimParamsDev.gridOrigin[0])/ gSimParamsDev.gridSpacingSubParticles); gridCoord.y = (unsigned int)((pos.y + d - gSimParamsDev.gridOrigin[1])/ gSimParamsDev.gridSpacingSubParticles); gridCoord.z = (unsigned int)((pos.z + d - gSimParamsDev.gridOrigin[2])/ gSimParamsDev.gridSpacingSubParticles); gridCoord.x = gridCoord.x%gSimParamsDev.gridDimSubParticles[0]; gridCoord.y = gridCoord.y%gSimParamsDev.gridDimSubParticles[1]; gridCoord.z = gridCoord.z%gSimParamsDev.gridDimSubParticles[2]; gridCoord.x = min(max(gridCoord.x, 0), gSimParamsDev.gridDimSubParticles[0] - 1); gridCoord.y = min(max(gridCoord.y, 0), gSimParamsDev.gridDimSubParticles[1] - 1); gridCoord.z = min(max(gridCoord.z, 0), gSimParamsDev.gridDimSubParticles[2] - 1); return gridCoord; } //----------------------------------------------------------------------------- __device__ inline int compute_hash_from_grid_coordinate (int i, int j, int k) { return gSimParamsDev.gridDim[0]*(gSimParamsDev.gridDim[1]*k + j) + i; } //----------------------------------------------------------------------------- __device__ inline int compute_hash_from_grid_coordinate_sub_particle (int i, int j, int k) { return gSimParamsDev.gridDimSubParticles[0]* (gSimParamsDev.gridDimSubParticles[1]*k + j) + i; } //----------------------------------------------------------------------------- __device__ inline float norm(const float3& a) { return sqrt(a.x*a.x+a.y*a.y+a.z*a.z); } //----------------------------------------------------------------------------- __device__ inline void normalize (float3& a) { float norm = sqrt(a.x*a.x+a.y*a.y+a.z*a.z); a.x /= norm; a.y /= norm; a.z /= norm; } //----------------------------------------------------------------------------- __device__ inline float compute_distance (float3 a, float3 b) { return sqrt((a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z)); } //----------------------------------------------------------------------------- __device__ inline float compute_squared_distance (float3 a, float3 b) { return ((a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z)); } //----------------------------------------------------------------------------- __device__ inline float dot_product (const float3& a, const float3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } //----------------------------------------------------------------------------- __device__ inline void project_quantities_cell (float3& acc, float& density, float& pressure, float& numNeighbors, const float3& xi, int start, int end) { int j; float3 xj; // neighbor particle's position float3 vj; // neighbor particle's velocity float rhoj; // neighbor density float pj; // neighbor pressure float3 aj; // neighbor acceleration float h = gSimParamsDev.compactSupportSub; float sqDist, d; for (int i = start; i < end; i++) { j = tex1Dfetch(gSortedParticleIdList, i); xj.x = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_X); xj.y = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Y); xj.z = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Z); rhoj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_DENSITY); pj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_PRESSURE); aj.x = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_ACC_X); aj.y = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_ACC_Y); aj.z = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_ACC_Z); sqDist = compute_squared_distance(xi, xj); if (sqDist <= h*h) { density += rhoj; pressure += pj; acc.x += aj.x; acc.y += aj.y; acc.z += aj.z; numNeighbors += 1.0f; /*d = h*h - sqDist; density += gSimParamsDev.poly6Sub*rhoj; pressure += gSimParamsDev.poly6Sub*pj; velocity.x += gSimParamsDev.poly6Sub*vj.x*d*d*d; velocity.y += gSimParamsDev.poly6Sub*vj.y*d*d*d; velocity.z += gSimParamsDev.poly6Sub*vj.z*d*d*d;*/ } } } //----------------------------------------------------------------------------- // Computes the contribution of neighborparticles of one particular grid cell // to the density of the particle at position [pos]. __device__ float compute_particle_density_cell (const float3 &pos, float* particleVertexData, int* particleIdList, int start, int end) { int particleIndex; // index of the neighbor of the particle float density = 0.0f; float3 p; // neighbor particle's position float h = gSimParamsDev.compactSupport; float r; float d; for (int i = start; i < end; i++) { particleIndex = particleIdList[i]; // compute position of the neighbor p.x = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_X]; p.y = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Y]; p.z = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Z]; r = compute_distance(p, pos); // TODO: evaluating r*r <= h*h might save taking the sqrt in // compute_distance proc. if (r <= h) { d = h*h - r*r; density += gSimParamsDev.poly6*d*d*d; } } return density; } //----------------------------------------------------------------------------- // Computes the contribution of neighborsub particles of one particular grid // cell to the density of the particle at position [pos]. __device__ float compute_sub_particle_density_cell (const float3 &pos, float* particleVertexData, int* particleIdList, int start, int end) { int particleIndex; // index of the neighbor of the particle float density = 0.0f; float3 p; // neighbor particle's position float h = gSimParamsDev.compactSupportSub; float r; float d; for (int i = start; i < end; i++) { particleIndex = particleIdList[i]; // compute position of the neighbor p.x = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_X]; p.y = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Y]; p.z = particleVertexData[particleIndex*VD_NUM_ELEMENTS + VD_POS_Z]; r = compute_distance(p, pos); // TODO: evaluating r*r <= h*h might save taking the sqrt in // compute_distance proc. if (r <= h) { d = h*h - r*r; density += gSimParamsDev.poly6Sub*d*d*d; } } return density; } //----------------------------------------------------------------------------- __device__ inline void compute_viscosity_pressure_forces_and_ifsurf_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* pressureForce, float3* viscosityForce, float3* colGra, float* colLapl, float3* sumPosNeighbor, float* nNeighbors) { int j; // neighbor index in particle list float3 xj; // neighbor particle's position float3 vj; // neighbor particle's velocity float rhoj; // neighbor density float pj; // neighbor pressure float3 r; // xi - xj float rn; // ||xi - xj|| float h = gSimParamsDev.compactSupport; // effective radius float grad = gSimParamsDev.gradSpiky; float lapl = gSimParamsDev.laplVisc; float grad2 = gSimParamsDev.gradPoly6; float lapl2 = gSimParamsDev.laplPoly6; float pressure; // pressure term in the kernel approx float rhoi2 = rhoi*rhoi; float m = gSimParamsDev.particleMass; float mu = gSimParamsDev.dynamicViscosity; float d; // helper value to avoid arithmetic operations for (int i = start; i < end; i++) { // get neighbor index from particle list j = tex1Dfetch(gSortedParticleIdList, i); // get neighbor particle information xj.x = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_X); xj.y = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Y); xj.z = tex1Dfetch(gParticleVertexData, j*VD_NUM_ELEMENTS + VD_POS_Z); vj.x = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_VEL0_X); vj.y = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_VEL0_Y); vj.z = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_VEL0_Z); rhoj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_DENSITY); pj = tex1Dfetch(gParticleSimulationData, j*SD_NUM_ELEMENTS + SD_PRESSURE); r.x = xi.x - xj.x; r.y = xi.y - xj.y; r.z = xi.z - xj.z; rn = norm(r); // TODO: * masse koennte ausgeklammert werden um multiplikationen // zu sparen. // * generell kann der pressure term in hinblick auf rhoi und // pi vereinfacht werden. // * visc force: mu koennte ausgeklammert werden etc. // * zwei float3's fuer beide kraefte koennten genutzt werden // um die terme zu vereinfachen. pressure = rhoi*(pi/rhoi2 + pj/(rhoj*rhoj))*m; if (rn <= h && rn > 0.0f) { // compute pressure force d = (h-rn)*(h-rn); pressureForce->x -= pressure*grad*d/rn*r.x; pressureForce->y -= pressure*grad*d/rn*r.y; pressureForce->z -= pressure*grad*d/rn*r.z; // compute viscosity force d = (h-rn); viscosityForce->x += mu*(vj.x-vi.x)*m/rhoj*lapl*d; viscosityForce->y += mu*(vj.y-vi.y)*m/rhoj*lapl*d; viscosityForce->z += mu*(vj.z-vi.z)*m/rhoj*lapl*d; // compute color gradient d = (h*h-rn*rn)*(h*h-rn*rn); colGra->x += m/rhoj*grad2*d*r.x; colGra->y += m/rhoj*grad2*d*r.y; colGra->z += m/rhoj*grad2*d*r.z; // compute color laplacian d = (h*h - rn*rn)*(3.0f*h*h - 7.0f*rn*rn); *colLapl += m/rhoj*lapl2*d; // sumPosNeighbor->x += xj.x; sumPosNeighbor->y += xj.y; sumPosNeighbor->z += xj.z; *nNeighbors += 1.0f; } } } //----------------------------------------------------------------------------- __device__ inline void compute_sub_particle_viscosity_pressure_forces_cell (const float3& xi, float rhoi, float pi, const float3& vi, float* particleVertexData, float* particleSimulationData, int* particleIdList, int start, int end, float3* force, float3* colGra, float* colLapl) { int j; // neighbor index in particle list float3 xj; // neighbor particle's position float3 vj; // neighbor particle's velocity float rhoj; // neighbor density float pj; // neighbor pressure float3 r; // xi - xj float rn; // ||xi - xj|| float h = gSimParamsDev.compactSupportSub; // effective radius float grad = gSimParamsDev.gradSpikySub; float lapl = gSimParamsDev.laplViscSub; float grad2 = gSimParamsDev.gradPoly6Sub; float lapl2 = gSimParamsDev.laplPoly6Sub; float pressure; // pressure term in the kernel approx float rhoi2 = rhoi*rhoi; float m = gSimParamsDev.subParticleMass; float mu = gSimParamsDev.dynamicViscosity; float d; // helper value to avoid arithmetic operations for (int i = start; i < end; i++) { // get neighbor index from particle list j = particleIdList[i]; // get neighbor particle information xj.x = particleVertexData[j*VD_NUM_ELEMENTS + VD_POS_X]; xj.y = particleVertexData[j*VD_NUM_ELEMENTS + VD_POS_Y]; xj.z = particleVertexData[j*VD_NUM_ELEMENTS + VD_POS_Z]; vj.x = particleSimulationData[j*SD_NUM_ELEMENTS + SD_VEL0_X]; vj.y = particleSimulationData[j*SD_NUM_ELEMENTS + SD_VEL0_Y]; vj.z = particleSimulationData[j*SD_NUM_ELEMENTS + SD_VEL0_Z]; rhoj = particleSimulationData[j*SD_NUM_ELEMENTS + SD_DENSITY]; pj = particleSimulationData[j*SD_NUM_ELEMENTS + SD_PRESSURE]; r.x = xi.x - xj.x; r.y = xi.y - xj.y; r.z = xi.z - xj.z; rn = norm(r); // TODO: * masse koennte ausgeklammert werden um multiplikationen // zu sparen. // * generell kann der pressure term in hinblick auf rhoi und // pi vereinfacht werden. // * visc force: mu koennte ausgeklammert werden etc. // * zwei float3's fuer beide kraefte koennten genutzt werden // um die terme zu vereinfachen. pressure = rhoi*(pi/rhoi2 + pj/(rhoj*rhoj))*m; if (rn <= h && rn > 0.0f) { // compute pressure force d = (h-rn)*(h-rn); force->x -= pressure*grad*d/rn*r.x; force->y -= pressure*grad*d/rn*r.y; force->z -= pressure*grad*d/rn*r.z; // compute viscosity force d = (h - rn); force->x += mu*(vj.x - vi.x)*m/rhoj*lapl*d; force->y += mu*(vj.y - vi.y)*m/rhoj*lapl*d; force->z += mu*(vj.z - vi.z)*m/rhoj*lapl*d; // compute color gradient d = (h*h - rn*rn)*(h*h - rn*rn); colGra->x += m/rhoj*grad2*d*r.x; colGra->y += m/rhoj*grad2*d*r.y; colGra->z += m/rhoj*grad2*d*r.z; // compute color laplacian d = (h*h - rn*rn)*(3.0f*h*h - 7.0f*rn*rn); *colLapl += m/rhoj*lapl2*d; } } } //----------------------------------------------------------------------------- template<typename T> __global__ void copy_array (T* dst, T* src, unsigned int numElements) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= numElements) { return; } dst[idx] = src[idx]; } //----------------------------------------------------------------------------- // HOST CODE //----------------------------------------------------------------------------- #define EMPTY_CELL 0xFFFFFFFF //----------------------------------------------------------------------------- // forward declaration of aux. functions //----------------------------------------------------------------------------- void create_particle_box (float sx, float sy, float sz, float d, unsigned int numParticles, float** particleVD, float** particleSD, unsigned int* numParticlesCreated); void set_simulation_domain (float xs, float ys, float zs, float xe, float ye, float ze, float gridSpacing, float gridSpacingSubParticles, SimulationParameters* parameters); void compute_particle_kernel_invocation_information (unsigned int& nThreadsBlock, unsigned int& nBlocks, unsigned int numParticles); void set_up_3d_float_texture (struct textureReference* texref, cudaArray* arr, float* data, unsigned int dim[3]); //----------------------------------------------------------------------------- // Definition of ParticleSimulation class //----------------------------------------------------------------------------- ParticleSimulation::ParticleSimulation (): mParticleVertexData(NULL), mParticleSimulationData(NULL), mParticleVertexDataDevPtr(NULL), mParticleSimulationDataDevPtr(NULL), mParticleIdsDevPtr(NULL), mParticleHashListDevPtr(NULL), mCellStartListDevPtr(NULL), mCellEndListDevPtr(NULL), mIsSurfaceParticleDevPtr(NULL), mParticleVertexDataVbo(0), mNumBlocks(0), mThreadsPerBlock(0), mNumSubParticles(0), mNumTimeSteps(0) { memset(&mParameters, 0, sizeof(SimulationParameters)); } //----------------------------------------------------------------------------- ParticleSimulation::~ParticleSimulation() { // free host memory saveDeleteArray<float>(&mParticleVertexData); saveDeleteArray<float>(&mParticleSimulationData); // free device memory // free cuda memory cudaSafeFree<float>(&mParticleVertexDataDevPtr); cudaSafeFree<float>(&mParticleSimulationDataDevPtr); cudaSafeFree<float>(&mSubParticleVertexDataDevPtr); cudaSafeFree<float>(&mSubParticleSimulationDataDevPtr); cudaSafeFree<char>(&mParticleStatesDevPtr); cudaSafeFree<int>(&mParticleHashListDevPtr); cudaSafeFree<int>(&mCellStartListDevPtr); cudaSafeFree<int>(&mCellEndListDevPtr); cudaSafeFree<int>(&mIsSurfaceParticleDevPtr); cudaSafeFree<int>(&mParticleIdsDevPtr); cudaSafeFree<int>(&_isSplitDevPtr); cudaSafeFree<int>(&_isBoundaryDevPtr); cudaSafeFree<int>(&_isDefaultDevPtr); cudaSafeFree<int>(&_splitPrefixSumDevPtr); cudaSafeFree<int>(&_boundaryPrefixSumDevPtr); cudaSafeFree<int>(&_defaultPrefixSumDevPtr); cudaSafeFree<int>(&mParticleIdsSplitDevPtr); cudaSafeFree<int>(&mParticleIdsDefaultDevPtr); cudaSafeFree<int>(&mSubParticleIdsDevPtr); cudaSafeFree<int>(&mParticleIdsBoundaryDevPtr); cudaSafeFree<int>(&mParticleIdsSplitDevPtr); cudaSafeFree<int>(&mSubParticleHashsDevPtr); cudaSafeFree<int>(&mSubParticleCellStartIdsDevPtr); cudaSafeFree<int>(&mSubParticleCellEndIdsDevPtr); // free OpenGL vertex buffer object if (mParticleVertexDataVbo != 0) { CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(mGraphicsResources[0]) ); CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(mGraphicsResources[1]) ); //cudaGLUnregisterBufferObject(mParticleVertexDataVbo); // <- deprecated glDeleteBuffers(1, &mParticleVertexDataVbo); glDeleteBuffers(1, &mSubParticleVertexDataVbo); mParticleVertexDataVbo = 0; mSubParticleVertexDataVbo = 0; } } //----------------------------------------------------------------------------- ParticleSimulation* ParticleSimulation::Example01 () { // create a particle simulation ParticleSimulation* sim = new ParticleSimulation(); // create box (cube) of particles create_particle_box(-0.65f, -0.45f, -0.25f, 0.5f, 40000, &sim->mParticleVertexData, &sim->mParticleSimulationData, &sim->mParameters.numParticles); if (sim->mParticleVertexData == NULL || sim->mParticleSimulationData == NULL) { THROW_EXCEPTION("Could not allocate memory for particles (Host)."); } // set sph simulation related parameters sim->mParameters.kernelParticles = 20; sim->mParameters.restDensity = 998.648f; sim->mParameters.particleMass = sim->mParameters.restDensity*0.5f*0.5f*0.5f/ static_cast<float>(sim->mParameters.numParticles); sim->mParameters.subParticleMass = sim->mParameters.particleMass/8.0f; sim->mParameters.gasStiffness = 3.0f; sim->mParameters.dynamicViscosity = 3.0f; sim->mParameters.gravity = 9.81f; sim->mParameters.tensionCoefficient = 0.0728f; sim->mParameters.normThresh = 15.065f; // compute the kernel radius float h = powf((3.0f*0.5f*0.5f*0.5f*sim->mParameters.kernelParticles)/ (4.0f*M_PI*sim->mParameters.numParticles), 1.0f/3.0f); sim->mParameters.compactSupport = h; sim->mParameters.compactSupportSub = h/2.0f; sim->mParameters.poly6 = 315.0f/(64.0f*M_PI*h*h*h*h*h*h*h*h*h); sim->mParameters.gradPoly6 = -945.0f/(32.0f*M_PI*h*h*h*h*h*h*h*h*h); sim->mParameters.laplPoly6 = -945.0f/(32.0f*M_PI*h*h*h*h*h*h*h*h*h); sim->mParameters.gradSpiky = -45.0f/(M_PI*h*h*h*h*h*h); sim->mParameters.laplVisc = 45.0f/(M_PI*h*h*h*h*h*h); sim->mParameters.poly6Sub = sim->mParameters.poly6*512.0f; sim->mParameters.gradPoly6Sub = sim->mParameters.gradPoly6*512.0f; sim->mParameters.laplPoly6Sub = sim->mParameters.laplPoly6*512.0f; sim->mParameters.gradSpikySub = sim->mParameters.gradSpiky*64.0f; sim->mParameters.laplViscSub = sim->mParameters.laplVisc*64.0f; sim->mParameters.timeStep = 0.003; sim->mParameters.timeStepSubParticles = 0.001f; set_simulation_domain(-1.0f, -1.0f, -1.0f, 1.0f, 1.0f, 1.0f, h, h/2.0f, &sim->mParameters); // set fluid volume sim->mParameters.fluidVolume = 0.5f*0.5f*0.5f; // set parameters for boundary handling sim->mParameters.restitution = 0.0f; sim->mParameters.boxCen[0] = 0.0f; sim->mParameters.boxCen[1] = 0.0f; sim->mParameters.boxCen[2] = 0.0f; sim->mParameters.boxDim[0] = 0.7f; sim->mParameters.boxDim[1] = 0.5f; sim->mParameters.boxDim[2] = 0.3f; // set parameters for new boundary handling sim->_boundaryMapFileName = std::string("icosphere.txt"); // set parameters for surface extraction sim->mParameters.cmDistanceThresh = 0.5f; sim->mParameters.nPartTresh = 20.0f; sim->_leftI = 0.0f; sim->_rightI = 1.0f; return sim; } //----------------------------------------------------------------------------- int* ParticleSimulation::CreateIsParticleSurfaceList (const ParticleSimulation* sim) { int* isSurfaceParticleList = new int[sim->mParameters.numParticles]; CUDA_SAFE_CALL( cudaMemcpy(isSurfaceParticleList, sim->mIsSurfaceParticleDevPtr, sizeof(int)*sim->mParameters.numParticles, cudaMemcpyDeviceToHost) ); int extr = 0; for (unsigned int i = 0; i < sim->mParameters.numParticles; i++) { extr += isSurfaceParticleList[i]; } printf("%d of %d extracted\n", extr, sim->mParameters.numParticles); return isSurfaceParticleList; } //----------------------------------------------------------------------------- void ParticleSimulation::FreeIsParticleSurfaceList (int** isSurfaceParticleList) { if (*isSurfaceParticleList == NULL) { return; } delete[] *isSurfaceParticleList; *isSurfaceParticleList = NULL; } //----------------------------------------------------------------------------- void ParticleSimulation::freeAll () { // free host memory saveDeleteArray<float>(&mParticleVertexData); saveDeleteArray<float>(&mParticleSimulationData); // free device memory // free cuda memory cudaSafeFree<float>(&mParticleSimulationDataDevPtr); cudaSafeFree<int>(&mParticleIdsDevPtr); cudaSafeFree<int>(&mParticleHashListDevPtr); cudaSafeFree<int>(&mCellStartListDevPtr); cudaSafeFree<int>(&mCellEndListDevPtr); cudaSafeFree<int>(&mIsSurfaceParticleDevPtr); // free OpenGL vertex buffer object if (mParticleVertexDataVbo != 0) { CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(mGraphicsResources[0]) ); CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(mGraphicsResources[1]) ); //cudaGLUnregisterBufferObject(mParticleVertexDataVbo); // <- deprecated glDeleteBuffers(1, &mParticleVertexDataVbo); glDeleteBuffers(1, &mSubParticleVertexDataVbo); mParticleVertexDataVbo = 0; mSubParticleVertexDataVbo = 0; } } //----------------------------------------------------------------------------- void ParticleSimulation::Init () { // // free device memory, if previously allocated // // free cuda memory cudaSafeFree<float>(&mParticleSimulationDataDevPtr); cudaSafeFree<int>(&mParticleIdsDevPtr); cudaSafeFree<int>(&mParticleHashListDevPtr); cudaSafeFree<int>(&mCellStartListDevPtr); cudaSafeFree<int>(&mCellEndListDevPtr); cudaSafeFree<int>(&mIsSurfaceParticleDevPtr); // free OpenGL vertex buffer object if (mParticleVertexDataVbo != 0) { CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(mGraphicsResources[0]) ); CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(mGraphicsResources[1]) ); glDeleteBuffers(1, &mParticleVertexDataVbo); glDeleteBuffers(1, &mSubParticleVertexDataVbo); mParticleVertexDataVbo = 0; mSubParticleVertexDataVbo = 0; } // // allocate cuda device memory for storing the particles' vertex and // simulation data. // Vertex data is allocated on device using OpenGL, as it is stored // in an vertex buffer object, which is used for rendering later. // // Simulation data is allocated through cuda. CUDA_SAFE_CALL( cudaMalloc(&mParticleSimulationDataDevPtr, mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS) ); // copy initial host data to device CUDA_SAFE_CALL( cudaMemcpy(mParticleSimulationDataDevPtr, mParticleSimulationData, mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS, cudaMemcpyHostToDevice) ); // Vertex data is allocated through a vertex buffer object // the vbo is then registered to be used with CUDA glGenBuffers(1, &mParticleVertexDataVbo); glBindBuffer(GL_ARRAY_BUFFER, mParticleVertexDataVbo); glBufferData(GL_ARRAY_BUFFER, mParameters.numParticles*VD_NUM_ELEMENTS*sizeof(float), mParticleVertexData, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( cudaGraphicsGLRegisterBuffer(&mGraphicsResources[0], mParticleVertexDataVbo, cudaGraphicsMapFlagsNone) ); //cudaGLRegisterBufferObject(mParticleVertexDataVbo); // <- is deprecated // // alloc & Init additional aux. arrays for nearest neighbor search // const int* dim = mParameters.gridDim; unsigned int size = dim[0]*dim[1]*dim[2]*sizeof(int); CUDA_SAFE_CALL( cudaMalloc(&mCellStartListDevPtr, size) ); CUDA_SAFE_CALL( cudaMalloc(&mCellEndListDevPtr, size) ); // set each cell to be empty CUDA_SAFE_CALL( cudaMemset(mCellStartListDevPtr, EMPTY_CELL, size) ); CUDA_SAFE_CALL( cudaMemset(mCellEndListDevPtr, EMPTY_CELL, size) ); CUDA_SAFE_CALL( cudaMalloc(&mParticleIdsDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&mParticleHashListDevPtr, mParameters.numParticles*sizeof(int)) ); // alloc dev memory for surface particle extraction CUDA_SAFE_CALL( cudaMalloc(&mIsSurfaceParticleDevPtr, mParameters.numParticles*sizeof(int)) ); try { this->allocateMemoryTwoScale(); } catch (std::runtime_error& e) { std::cout << e.what() << std::endl; system("pause"); } // set up textures, for faster memory look-ups through caching // NOTE: VertexData needs to be mapped to get a valid device pointer, // as it is initial not allocated through CUDA's malloc cudaChannelFormatDesc descf = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaChannelFormatDesc desci = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindSigned); cudaChannelFormatDesc descu = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned); CUDA_SAFE_CALL ( cudaBindTexture(0, gParticleSimulationData, mParticleSimulationDataDevPtr, descf, sizeof(float)*SD_NUM_ELEMENTS*mParameters.numParticles) ); this->map(); CUDA_SAFE_CALL ( cudaBindTexture(0, gParticleVertexData, mParticleVertexDataDevPtr, descf, sizeof(float)*VD_NUM_ELEMENTS*mParameters.numParticles) ); this->unmap(); CUDA_SAFE_CALL ( cudaBindTexture(0, gCellStartList, mCellStartListDevPtr, desci, size) ); CUDA_SAFE_CALL ( cudaBindTexture(0, gCellEndList, mCellEndListDevPtr, desci, size) ); CUDA_SAFE_CALL ( cudaBindTexture(0, gSortedParticleIdList, mParticleIdsDevPtr, desci, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL ( cudaBindTexture(0, gParticleHashList, mParticleHashListDevPtr, desci, mParameters.numParticles*sizeof(int)) ); // set number of CUDA blocks and threads per blocks for each kernel // invocation // NOTE: - chose different values than 256 to try to get more performance // - make threadsPerBlock and blocks function parameters compute_particle_kernel_invocation_information(mThreadsPerBlock, mNumBlocks, mParameters.numParticles); this->setUpSphInComplexShapes(); } //----------------------------------------------------------------------------- // allocates and initializes memory needed for the two scale particle // simulation void ParticleSimulation::allocateMemoryTwoScale () { // create opengl vbo for storing the vertex information of the // sub particles glGenBuffers(1, &mSubParticleVertexDataVbo); glBindBuffer(GL_ARRAY_BUFFER, mSubParticleVertexDataVbo); glBufferData(GL_ARRAY_BUFFER, 8*mParameters.numParticles*VD_NUM_ELEMENTS* sizeof(float), NULL, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( cudaGraphicsGLRegisterBuffer(&mGraphicsResources[1], mSubParticleVertexDataVbo, cudaGraphicsMapFlagsNone) ); // create opengl vbo for storing the ids of the particles in // default state glGenBuffers(1, &mParticleIdsDefaultVbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mParticleIdsDefaultVbo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, mParameters.numParticles*sizeof(int), NULL, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( cudaGraphicsGLRegisterBuffer(&mGraphicsResources[2], mParticleIdsDefaultVbo, cudaGraphicsMapFlagsNone) ); // create opengl vbo for storing the ids of the active sub particles glGenBuffers(1, &mSubParticleIdsVbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mSubParticleIdsVbo); glBufferData(GL_ELEMENT_ARRAY_BUFFER, mParameters.numParticles*sizeof(int)*8, NULL, GL_DYNAMIC_COPY); CUDA_SAFE_CALL( cudaGraphicsGLRegisterBuffer(&mGraphicsResources[3], mSubParticleIdsVbo, cudaGraphicsMapFlagsNone) ); CUDA_SAFE_CALL( cudaMalloc(&mSubParticleSortedIdsDevPtr, sizeof(int)*8*mParameters.numParticles) ); CUDA_SAFE_CALL( cudaMalloc(&mSubParticleSimulationDataDevPtr, 8*mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS) ); CUDA_SAFE_CALL( cudaMemset(mSubParticleSimulationDataDevPtr, 0, 8*mParameters.numParticles*sizeof(float)*SD_NUM_ELEMENTS) ); CUDA_SAFE_CALL( cudaMalloc(&mSubParticleHashsDevPtr, 8*mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&mParticleStatesDevPtr, mParameters.numParticles*sizeof(char)) ); CUDA_SAFE_CALL( cudaMemset(mParticleStatesDevPtr, 0, mParameters.numParticles*sizeof(char)) ); mParticleStates = new char[mParameters.numParticles]; CUDA_SAFE_CALL( cudaMalloc(&_isSplitDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&_isBoundaryDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&_isDefaultDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&_splitPrefixSumDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&_boundaryPrefixSumDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&_defaultPrefixSumDevPtr, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&mParticleIdsDefaultDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&mParticleIdsBoundaryDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&mParticleIdsSplitDevPtr, mParameters.numParticles*sizeof(int)) ); CUDA_SAFE_CALL( cudaMalloc(&mSubParticleCellStartIdsDevPtr, sizeof(int)*mParameters.gridDimSubParticles[0]* mParameters.gridDimSubParticles[1]* mParameters.gridDimSubParticles[2]) ); CUDA_SAFE_CALL( cudaMalloc(&mSubParticleCellEndIdsDevPtr, sizeof(int)*mParameters.gridDimSubParticles[0]* mParameters.gridDimSubParticles[1]* mParameters.gridDimSubParticles[2]) ); } //----------------------------------------------------------------------------- void ParticleSimulation::Bind () const { // copy simulation parameters to constant memory on device. CUDA_SAFE_CALL( cudaMemcpyToSymbol(gSimParamsDev, (void*)&mParameters, sizeof(SimulationParameters)) ); } //----------------------------------------------------------------------------- void ParticleSimulation::Advance () { try { this->map(); this->computeParticleHash(); this->sortParticleIdsByHash(); this->computeCellStartEndList(); this->computeDensityPressure(); this->computeAcceleration(); this->computeParticleState(); this->collect(); this->initializeSubParticles(); //this->computeSubParticleHash(); //this->sortSubParticleIdsByHash(); //this->computeSubParticleCellStartEndList(); //this->projectQuantities(); //this->computeSubParticleDensityPressure(); //this->computeSubParticleAcceleration(); this->integrate(); //this->integrateSubParticles(); //this->handleCollisions(); //this->handleSubParticleCollisions(); this->unmap(); mNumTimeSteps++; } catch (runtime_error& e) { cout << e.what() << endl; system("pause"); } } //----------------------------------------------------------------------------- void ParticleSimulation::AdvanceSubParticles () { try { this->map(); this->computeSubParticleHash(); this->sortSubParticleIdsByHash(); this->computeSubParticleCellStartEndList(); this->computeSubParticleDensityPressure(); this->projectQuantities(); mTimer.Start(); this->computeSubParticleAcceleration(); mTimer.Stop(); mTimer.DumpElapsed(); this->integrateSubParticles(); this->handleSubParticleCollisions(); this->unmap(); } catch (runtime_error& e) { cout << e.what() << endl; system("pause"); } } //----------------------------------------------------------------------------- void ParticleSimulation::AdvanceTwoScale () { try { this->map(); this->computeParticleHash(); this->sortParticleIdsByHash(); this->computeCellStartEndList(); this->computeDensityPressure(); this->computeAcceleration(); this->computeParticleState(); this->collect(); this->initializeSubParticles(); this->projectQuantities(); this->computeSubParticleHash(); this->sortSubParticleIdsByHash(); this->computeSubParticleCellStartEndList(); this->computeSubParticleDensityPressure(); this->computeSubParticleAcceleration(); this->integrate(); this->integrateSubParticles(); this->handleCollisions(); this->handleSubParticleCollisions(); this->unmap(); mNumTimeSteps++; } catch (runtime_error& e) { cout << e.what() << endl; system("pause"); } } //----------------------------------------------------------------------------- /*void ParticleSimulation::Check3DTextures () const { // compute a higher res slice of the density data using intrinsic trilinear // interpolation to check of the textures have been set up correctly. unsigned int width = mParameters.gridDim[0]; unsigned int height = mParameters.gridDim[1]; float* sliceDataDevPtr; CUDA_SAFE_CALL( cudaMalloc(&sliceDataDevPtr, sizeof(float)*width*height) ); dim3 blockSize(16, 16, 1); dim3 gridSize(width/blockSize.x + 1, height/blockSize.y + 1); create_density_slice <<<gridSize, blockSize>>> (sliceDataDevPtr, width, height, mParameters.gridDim[2]/2); float* sliceData = new float[width*height]; CUDA_SAFE_CALL( cudaMemcpy(sliceData, sliceDataDevPtr, sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaFree(sliceDataDevPtr) ); PortablePixmap ppm(width, height, 255); float maxDensity = mBoundaryHandling->ComputeMaxDensity(); float restDistance = mBoundaryHandling->GetRestDistance(); for (unsigned int j = 0; j < height; j++) { for (unsigned int i = 0; i < width; i++) { unsigned int idx = i + j*width; float density = sliceData[idx]; ppm.setJET(i,j, std::abs(density)/restDistance); } } ppm.save("3dtextest.ppm"); delete[] sliceData; }*/ //----------------------------------------------------------------------------- float ParticleSimulation::GetParticleRadius () const { return powf((3.0*mParameters.fluidVolume)/ (4.0*M_PI*mParameters.numParticles), 1.0f/3.0f); } //----------------------------------------------------------------------------- float ParticleSimulation::GetSubParticleRadius () const { return 0.5f*this->GetParticleRadius(); } //----------------------------------------------------------------------------- const char* ParticleSimulation::GetParticleState () const { CUDA_SAFE_CALL( cudaMemcpy(mParticleStates, mParticleStatesDevPtr, sizeof(char)*mParameters.numParticles, cudaMemcpyDeviceToHost) ); return mParticleStates; } //----------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumParticles () const { return mParameters.numParticles; } //----------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumTimesSteps () const { return mNumTimeSteps; } //----------------------------------------------------------------------------- void ParticleSimulation::SetNPartThresh (float dVal) { mParameters.nPartTresh += dVal; printf("# particle thresh %f\n", mParameters.nPartTresh); this->Bind(); } //----------------------------------------------------------------------------- void ParticleSimulation::DecreaseCmDistanceThresh () { _rightI = mParameters.cmDistanceThresh; mParameters.cmDistanceThresh = 0.5f*(_rightI - _leftI); printf("cmDistance = %f\n", mParameters.cmDistanceThresh); this->Bind(); } //----------------------------------------------------------------------------- void ParticleSimulation::IncreaseCmDistanceThresh () { _leftI = mParameters.cmDistanceThresh; mParameters.cmDistanceThresh = 0.5f*(_rightI - _leftI); printf("cmDistance = %f\n", mParameters.cmDistanceThresh); this->Bind(); } //----------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLParticleVertexBufferObject () const { return mParticleVertexDataVbo; } //----------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLParticleIndexVertexBufferObject () const { return mParticleIdsDefaultVbo; } //----------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumParticlesDefault () const { return mNumParticlesDefault; } //---------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLSubParticleVertexBufferObject () const { return mSubParticleVertexDataVbo; } //---------------------------------------------------------------------------- GLuint ParticleSimulation::GetGLSubParticleIndexVertexBufferObject () const { return mSubParticleIdsVbo; } //---------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumSubParticles () const { return mNumSubParticles; } //---------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumSubParticlesRegular () const { return mNumParticlesSplit*8; } //---------------------------------------------------------------------------- unsigned int ParticleSimulation::GetNumSubParticlesBoundary () const { return mNumParticlesBoundary*8; } //----------------------------------------------------------------------------- // Definition of private methods //----------------------------------------------------------------------------- void ParticleSimulation::computeParticleHash () { compute_particle_hash <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr, mParticleIdsDevPtr, mParticleHashListDevPtr, mParameters.numParticles); } //----------------------------------------------------------------------------- void ParticleSimulation::computeSubParticleHash () { if (mNumSubParticles != 0) { compute_sub_particle_hash <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle >>> (mSubParticleVertexDataDevPtr, mSubParticleIdsDevPtr, mSubParticleHashsDevPtr, mNumSubParticles); } } //----------------------------------------------------------------------------- void ParticleSimulation::sortParticleIdsByHash () { thrust::sort_by_key(thrust::device_ptr<int>(mParticleHashListDevPtr), thrust::device_ptr<int>(mParticleHashListDevPtr + mParameters.numParticles), thrust::device_ptr<int>(mParticleIdsDevPtr)); } //----------------------------------------------------------------------------- void ParticleSimulation::sortSubParticleIdsByHash () { copy_array <int> <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle >>> (mSubParticleSortedIdsDevPtr, mSubParticleIdsDevPtr, mNumSubParticles); thrust::sort_by_key(thrust::device_ptr<int>(mSubParticleHashsDevPtr), thrust::device_ptr<int>(mSubParticleHashsDevPtr + mNumSubParticles), thrust::device_ptr<int>(mSubParticleSortedIdsDevPtr)); } //----------------------------------------------------------------------------- void ParticleSimulation::computeCellStartEndList () { int* dim = mParameters.gridDim; unsigned int size = dim[0]*dim[1]*dim[2]*sizeof(int); cudaMemset(mCellStartListDevPtr, EMPTY_CELL, size); cudaMemset(mCellEndListDevPtr, EMPTY_CELL, size); int sharedMemSize = sizeof(int)*(mThreadsPerBlock + 1); compute_cell_start_end <<< mNumBlocks, mThreadsPerBlock, sharedMemSize>>> (mParticleHashListDevPtr, mCellStartListDevPtr, mCellEndListDevPtr, mParameters.numParticles); } //----------------------------------------------------------------------------- void ParticleSimulation::computeSubParticleCellStartEndList () { int* dim = mParameters.gridDimSubParticles; unsigned int size = dim[0]*dim[1]*dim[2]*sizeof(int); if (mNumSubParticles == 0) { return; } cudaMemset(mSubParticleCellStartIdsDevPtr, EMPTY_CELL, size); cudaMemset(mSubParticleCellEndIdsDevPtr, EMPTY_CELL, size); int sharedMemSize = sizeof(int)*(mThreadsPerBlockSubParticle + 1); compute_cell_start_end <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle, sharedMemSize>>> (mSubParticleHashsDevPtr, mSubParticleCellStartIdsDevPtr, mSubParticleCellEndIdsDevPtr, mNumSubParticles); } //----------------------------------------------------------------------------- void ParticleSimulation::computeDensityPressure () { compute_particle_density_pressure <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr,mParticleSimulationDataDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr); } //----------------------------------------------------------------------------- void ParticleSimulation::computeSubParticleDensityPressure () { if (mNumSubParticles == 0) { return; } compute_sub_particle_density_pressure <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle >>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mSubParticleSortedIdsDevPtr, mSubParticleCellStartIdsDevPtr, mSubParticleCellEndIdsDevPtr, mNumParticlesSplit*8); } //----------------------------------------------------------------------------- void ParticleSimulation::computeAcceleration () { compute_particle_acceleration_ifsurf <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr, mIsSurfaceParticleDevPtr); } //----------------------------------------------------------------------------- //__global__ void compute_sub_particle_acceleration // (float* subParticleVertexData, float* subParticleSimulationData, // int* subParticleIdList, int* subParticleSortedIdList, int* cellStartList, // int* cellEndList, unsigned int numParticles) void ParticleSimulation::computeSubParticleAcceleration () { if (mNumParticlesSplit == 0) { return; } compute_sub_particle_acceleration <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle >>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mSubParticleSortedIdsDevPtr, mSubParticleCellStartIdsDevPtr, mSubParticleCellEndIdsDevPtr, mNumParticlesSplit*8); } //----------------------------------------------------------------------------- void ParticleSimulation::projectQuantities () { if (mNumSubParticles == 0) { return; } project_quantities <<< mNumBlocksSubParticleBoundary, mThreadsPerBlockSubParticleBoundary >>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, 8*mNumParticlesBoundary, 8*mNumParticlesSplit); } //----------------------------------------------------------------------------- void ParticleSimulation::integrate () { integrate_euler <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr); } //----------------------------------------------------------------------------- //__global__ void integrate_sub_particles_euler (float* subParticleVertexData, // float* subParticleSimulationData, int* subParticleIds, // unsigned int nSubParticles, unsigned int offset) void ParticleSimulation::integrateSubParticles () { if (mNumSubParticles != 0) { integrate_sub_particles_euler <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle >>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mNumSubParticles, 0); } } //----------------------------------------------------------------------------- void ParticleSimulation::handleCollisions () { collision_handling <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr); } //----------------------------------------------------------------------------- void ParticleSimulation::handleSubParticleCollisions () { if (mNumSubParticles == 0) { return; } collision_handling_sub_particles <<< mNumBlocksSubParticle, mThreadsPerBlockSubParticle >>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mSubParticleIdsDevPtr, mNumSubParticles, 0); } //----------------------------------------------------------------------------- void ParticleSimulation::computeParticleState () { shift_state <<< mNumBlocks, mThreadsPerBlock >>> (mParticleStatesDevPtr); find_split_particles <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr, mParticleStatesDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr); find_boundary_particles <<< mNumBlocks, mThreadsPerBlock >>> (mParticleVertexDataDevPtr, mParticleStatesDevPtr, mParticleIdsDevPtr, mCellStartListDevPtr, mCellEndListDevPtr); } //----------------------------------------------------------------------------- // Collects all id's of active sub particles, particles with state "split", // particles with state "boundary", particles with that "default" in their own // arrays and computes the total number of each particle. void ParticleSimulation::collect () { CUDA_SAFE_CALL( cudaMemset(_isSplitDevPtr, 0, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMemset(_isBoundaryDevPtr, 0, (mParameters.numParticles + 1)*sizeof(int)) ); CUDA_SAFE_CALL( cudaMemset(_isDefaultDevPtr, 0, (mParameters.numParticles + 1)*sizeof(int)) ); check_split_boundary_default <<< mNumBlocks, mThreadsPerBlock >>> (mParticleStatesDevPtr, _isSplitDevPtr, _isBoundaryDevPtr, _isDefaultDevPtr); thrust::exclusive_scan(thrust::device_ptr<int>(_isSplitDevPtr), thrust::device_ptr<int>(_isSplitDevPtr + mParameters.numParticles + 1), thrust::device_ptr<int>(_splitPrefixSumDevPtr)); thrust::exclusive_scan(thrust::device_ptr<int>(_isBoundaryDevPtr), thrust::device_ptr<int>(_isBoundaryDevPtr + mParameters.numParticles + 1), thrust::device_ptr<int>(_boundaryPrefixSumDevPtr)); thrust::exclusive_scan(thrust::device_ptr<int>(_isDefaultDevPtr), thrust::device_ptr<int>(_isDefaultDevPtr + mParameters.numParticles + 1), thrust::device_ptr<int>(_defaultPrefixSumDevPtr)); CUDA_SAFE_CALL( cudaMemcpy(&mNumParticlesSplit, &_splitPrefixSumDevPtr[mParameters.numParticles], sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(&mNumParticlesBoundary, &_boundaryPrefixSumDevPtr[mParameters.numParticles], sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(&mNumParticlesDefault, &_defaultPrefixSumDevPtr[mParameters.numParticles], sizeof(int), cudaMemcpyDeviceToHost) ); collect_ids <<< mNumBlocks, mThreadsPerBlock >>> (mSubParticleIdsDevPtr, mSubParticleSortedIdsDevPtr, mParticleIdsSplitDevPtr, mParticleIdsBoundaryDevPtr, mParticleIdsDefaultDevPtr, _isSplitDevPtr, _isBoundaryDevPtr, _isDefaultDevPtr, _splitPrefixSumDevPtr, _boundaryPrefixSumDevPtr, _defaultPrefixSumDevPtr, mNumParticlesSplit); mNumSubParticles = 8*(mNumParticlesSplit + mNumParticlesBoundary); // compute how many cuda blocks and how many threads a block are needed for // split particles, boundary particles, default particles. compute_particle_kernel_invocation_information(mThreadsPerBlockSplit, mNumBlocksSplit, mNumParticlesSplit); compute_particle_kernel_invocation_information(mThreadsPerBlockBoundary, mNumBlocksBoundary, mNumParticlesBoundary); compute_particle_kernel_invocation_information(mThreadsPerBlockDefault, mNumBlocksDefault, mNumParticlesDefault); compute_particle_kernel_invocation_information(mThreadsPerBlockSubParticle, mNumBlocksSubParticle, mNumSubParticles); compute_particle_kernel_invocation_information (mThreadsPerBlockSubParticleBoundary, mNumBlocksSubParticleBoundary, 8*mNumParticlesBoundary); compute_particle_kernel_invocation_information (mThreadsPerBlockSubParticleRegular, mNumBlocksSubParticleRegular, 8*mNumParticlesSplit); } //----------------------------------------------------------------------------- void ParticleSimulation::initializeSubParticles () { // initializes new sub particles if a parent particle has changed its // state from "default" to "boundary" or "split" if (mNumParticlesSplit > 0) { initialize_sub_particles <<<mNumBlocksSplit, mThreadsPerBlockSplit>>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mParticleIdsSplitDevPtr, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mParticleStatesDevPtr, mNumParticlesSplit); } if (mNumParticlesBoundary > 0) { initialize_sub_particles <<<mNumBlocksBoundary, mThreadsPerBlockBoundary>>> (mSubParticleVertexDataDevPtr, mSubParticleSimulationDataDevPtr, mParticleIdsBoundaryDevPtr, mParticleVertexDataDevPtr, mParticleSimulationDataDevPtr, mParticleStatesDevPtr, mNumParticlesBoundary); } } //----------------------------------------------------------------------------- void ParticleSimulation::setUpSphInComplexShapes () { // set up the three dimensional textures that store the boundary // information of the SPH in complex shapes paper (density contribution, // distances). // create boundary handling data Wm5::Vector3f s(-0.8f, -0.6f, -0.4f); Wm5::Vector3f e(0.8f, 0.6f, 0.4f); float h = mParameters.compactSupport; float particleSpacing = std::powf(mParameters.particleMass/mParameters.restDensity, 1.0f/3.0f); float mass = mParameters.particleMass; SphInComplexShapes* mBoundaryHandling = new SphInComplexShapes(s, e, h/4.0f, h, h, mass, mParameters.restDensity, mParameters.dynamicViscosity, particleSpacing); Wm5::Box3f b(Wm5::Vector3f(0.0f, 0.0f, 0.0f), Wm5::Vector3f(1.0f, 0.0f, 0.0f), Wm5::Vector3f(0.0f, 1.0f, 0.0f), Wm5::Vector3f(0.0f, 0.0f, 1.0f), 0.7f, 0.5f, 0.3f); mBoundaryHandling->SetBox(b); mBoundaryHandling->SaveSlicedDistanceMapToPpm("distances.ppm"); mBoundaryHandling->SaveSlicedViscosityMapToPpm("viscosities.ppm"); // send boundary grid information to device float gridOrigin[3]; gridOrigin[0] = mBoundaryHandling->GetGridStart().X(); gridOrigin[1] = mBoundaryHandling->GetGridStart().Y(); gridOrigin[2] = mBoundaryHandling->GetGridStart().Z(); unsigned int gridDimensions[3]; gridDimensions[0] = mBoundaryHandling->GetGridDimension(0); gridDimensions[1] = mBoundaryHandling->GetGridDimension(1); gridDimensions[2] = mBoundaryHandling->GetGridDimension(2); float gridSpacing = mBoundaryHandling->GetGridSpacing(); float restDistance = mBoundaryHandling->GetRestDistance(); float gridLength[3]; gridLength[0] = (gridDimensions[0] - 1)*gridSpacing; gridLength[1] = (gridDimensions[1] - 1)*gridSpacing; gridLength[2] = (gridDimensions[2] - 1)*gridSpacing; CUDA_SAFE_CALL( cudaMemcpyToSymbol(gBoundaryGridOrigin, gridOrigin, 3*sizeof(float)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(gBoundaryGridDimensions, gridDimensions, 3*sizeof(unsigned int)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(gBoundaryGridLength, gridLength, 3*sizeof(float)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(gBoundaryGridSpacing, &gridSpacing, sizeof(float)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(gBoundaryRestDistance, &restDistance, sizeof(float)) ); // set up 3d textures float* densityTexData = SphInComplexShapes::CreateDensityTextureData(*mBoundaryHandling); set_up_3d_float_texture(&gBoundaryDensities, mBoundaryDensities, densityTexData, gridDimensions); delete[] densityTexData; float* distanceTexData = SphInComplexShapes::CreateDistanceTextureData(*mBoundaryHandling); set_up_3d_float_texture(&gBoundaryDistances, mBoundaryDistances, distanceTexData, gridDimensions); delete[] distanceTexData; float* viscosityTexData = SphInComplexShapes::CreateViscosityTextureData(*mBoundaryHandling); set_up_3d_float_texture(&gBoundaryViscosities, mBoundaryViscosities, viscosityTexData, gridDimensions); delete[] viscosityTexData; delete mBoundaryHandling; // set up wall textures for sub particles /* { float h = mParameters.compactSupportSub; float particleSpacing = std::powf(mParameters.subParticleMass/mParameters.restDensity, 1.0f/3.0f); float mass = mParameters.subParticleMass; SphInComplexShapes* mBoundaryHandling = new SphInComplexShapes(s, e, h/4.0f, h, h, mass, mParameters.restDensity, mParameters.dynamicViscosity, particleSpacing); }*/ } //----------------------------------------------------------------------------- void ParticleSimulation::map () { cudaGraphicsMapResources(4, mGraphicsResources); size_t nBytes; cudaGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mParticleVertexDataDevPtr), &nBytes, mGraphicsResources[0]); cudaGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mSubParticleVertexDataDevPtr), &nBytes, mGraphicsResources[1]); cudaGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mParticleIdsDefaultDevPtr), &nBytes, mGraphicsResources[2]); cudaGraphicsResourceGetMappedPointer (reinterpret_cast<void**>(&mSubParticleIdsDevPtr), &nBytes, mGraphicsResources[3]); } void ParticleSimulation::unmap () { cudaGraphicsUnmapResources(4, mGraphicsResources); //cudaGLUnmapBufferObject(mParticleVertexDataVbo); } //----------------------------------------------------------------------------- void ParticleSimulation::SaveInfoTable (const std::string& filename) { using namespace std; ofstream file; file.open(filename); int* pIdList = new int[mParameters.numParticles]; int* pHashList = new int[mParameters.numParticles]; int cellListSize = mParameters.gridDim[0]*mParameters.gridDim[1]* mParameters.gridDim[2]; int* pCellStartList = new int[cellListSize]; int* pCellEndList = new int[cellListSize]; //this->map(); cudaMemcpy(pHashList, mParticleHashListDevPtr, mParameters.numParticles*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(pIdList, mParticleIdsDevPtr, mParameters.numParticles*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(pCellStartList, mCellStartListDevPtr, cellListSize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(pCellEndList, mCellEndListDevPtr, cellListSize*sizeof(int), cudaMemcpyDeviceToHost); file << "Number of particles " << mParameters.numParticles << endl; file << setw(8) << "index" << setw(12) << " id" << setw(12) << " hash" << setw(12) << " start" << setw(12) << " end" << endl; for (unsigned int i = 0; i < cellListSize; i++) { file << setw(8) << i; if(i < mParameters.numParticles) { file << setw(12) << pIdList[i]; file << setw(12) << pHashList[i]; } else { file << setw(12) << ""; file << setw(12) << ""; } if(pCellStartList[i] == EMPTY_CELL) { file << setw(12) << ""; } else { file << setw(12) << pCellStartList[i]; } if(pCellEndList[i] == EMPTY_CELL) { file << setw(12) << "" << endl; } else { file << setw(12) << pCellEndList[i] << endl; } } delete[] pIdList; delete[] pHashList; delete[] pCellStartList; delete[] pCellEndList; file.close(); //this->unmap(); } //----------------------------------------------------------------------------- void ParticleSimulation::SaveParticleInfo (const std::string& filename) { using namespace std; this->map(); ofstream file; file.open(filename); float* particleVertexData = new float[VD_NUM_ELEMENTS*mParameters.numParticles]; float* particleSimulationData = new float[SD_NUM_ELEMENTS*mParameters.numParticles]; // copy particle information from device to host cudaMemcpy(particleVertexData, mParticleVertexDataDevPtr, VD_NUM_ELEMENTS*mParameters.numParticles*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particleSimulationData, mParticleSimulationDataDevPtr, SD_NUM_ELEMENTS*mParameters.numParticles*sizeof(float), cudaMemcpyDeviceToHost); // set max. chars for each column of the table int columnWidth = 20; file << setw(columnWidth) << "Index"; file << setw(columnWidth) << "X"; file << setw(columnWidth) << "Y"; file << setw(columnWidth) << "Z"; file << setw(columnWidth) << "Density"; file << setw(columnWidth) << "Pressure"; file << setw(columnWidth) << "Acc X"; file << setw(columnWidth) << "Acc Y"; file << setw(columnWidth) << "Acc Z"; file << endl; for (unsigned int i = 0; i < mParameters.numParticles; i++) { file << setw(columnWidth) << i; file << setw(columnWidth) << particleVertexData[VD_NUM_ELEMENTS*i + VD_POS_X]; file << setw(columnWidth) << particleVertexData[VD_NUM_ELEMENTS*i + VD_POS_Y]; file << setw(columnWidth) << particleVertexData[VD_NUM_ELEMENTS*i + VD_POS_Z]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_DENSITY]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_PRESSURE]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_ACC_X]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_ACC_Y]; file << setw(columnWidth) << particleSimulationData[SD_NUM_ELEMENTS*i + SD_ACC_Z]; // TODO: rest of the params. file << endl; } delete[] particleVertexData; delete[] particleSimulationData; file.close(); this->unmap(); } //----------------------------------------------------------------------------- /* unsigned int ParticleSimulation::GetSizeMemoryGPU () const { float size; size += mNumPar }*/ //----------------------------------------------------------------------------- // definition of aux. functions //----------------------------------------------------------------------------- void create_particle_box (float sx, float sy, float sz, float d, unsigned int numParticles, float** particleVD, float** particleSD, unsigned int* numParticlesCreated) { // Creates a set of particles, that are aligned in a cube, given the starting // point of the box [sx, sy, sz] the length of the cube in each direction [d] // and the approximate amount of total particles [numParticles]. // // Returns a pointer to the vertex data of the particles in [particleVD] and // a pointer to the simulation data of the particles in [particleSD] and the // actual amount of particles created // computed number of particles in each direction unsigned int num = pow(static_cast<double>(numParticles), 1.0/3.0); *numParticlesCreated = num*num*num; *particleVD = new float[*numParticlesCreated*VD_NUM_ELEMENTS]; *particleSD = new float[*numParticlesCreated*SD_NUM_ELEMENTS]; // check if new failed. if ((*particleSD) == NULL || (*particleSD) == NULL) { *numParticlesCreated = 0; return; } // compute spatial increment float dx = d/static_cast<float>(num - 1); // seed the particles inside the cube // set the position of each particle unsigned int idx; for (unsigned int k = 0; k < num; k++) { for (unsigned int j = 0; j < num; j++) { for (unsigned int i = 0; i < num; i++) { idx = VD_NUM_ELEMENTS*(num*(num*k+j)+i); (*particleVD)[idx + VD_POS_X] = sx + i*dx; (*particleVD)[idx + VD_POS_Y] = sy + j*dx; (*particleVD)[idx + VD_POS_Z] = sz + k*dx; } } } // set other particles attributes to 0.0f memset((*particleSD), 0, sizeof(float)*SD_NUM_ELEMENTS*(*numParticlesCreated)); } //----------------------------------------------------------------------------- void set_simulation_domain (float xs, float ys, float zs, float xe, float ye, float ze, float gridSpacing, float gridSpacingSubParticles, SimulationParameters* parameters) { // Sets the simulation domain in the [parameters], based on a starting point // [xs, ys, zs] an ending point [xe, ye, ze] and the distance between two // grid points [gridSpacing]. parameters->gridOrigin[0] = xs; parameters->gridOrigin[1] = ys; parameters->gridOrigin[2] = zs; parameters->gridDim[0] = static_cast<int>((xe - xs)/gridSpacing + 0.5); parameters->gridDim[1] = static_cast<int>((ye - ys)/gridSpacing + 0.5); parameters->gridDim[2] = static_cast<int>((ze - zs)/gridSpacing + 0.5); parameters->gridDimSubParticles[0] = static_cast<int>((xe - xs)/gridSpacingSubParticles + 0.5); parameters->gridDimSubParticles[1] = static_cast<int>((ye - ys)/gridSpacingSubParticles + 0.5); parameters->gridDimSubParticles[2] = static_cast<int>((ze - zs)/gridSpacingSubParticles + 0.5); parameters->gridSpacing = gridSpacing; parameters->gridSpacingSubParticles = gridSpacingSubParticles; } //----------------------------------------------------------------------------- void compute_particle_kernel_invocation_information (unsigned int& nThreadsBlock, unsigned int& nBlocks, unsigned int numParticles) { if (numParticles == 0) { nThreadsBlock = 0; nBlocks = 0; return; } nThreadsBlock = numParticles > 256 ? 256 : numParticles; nBlocks = numParticles % nThreadsBlock == 0 ? numParticles/nThreadsBlock : numParticles/nThreadsBlock + 1; } //----------------------------------------------------------------------------- void set_up_3d_float_texture (struct textureReference* texRef, cudaArray* arr, float* data, unsigned int dim[3]) { // debug! set all arr vals to 0.5f /*for (unsigned int i = 0; i < dim[0]*dim[1]*dim[2]; i++) { if (i % 2) { data[i] = 1.0f; } else { data[i] = 0.5f; } }*/ // set allocation parmeters cudaChannelFormatDesc descf = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaExtent extent; extent.width = dim[0]; extent.height = dim[1]; extent.depth = dim[2]; // alloc cuda array CUDA_SAFE_CALL( cudaMalloc3DArray(&arr, &descf, extent) ); // set copy parameters cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(cudaMemcpy3DParms)); copyParams.srcPtr = make_cudaPitchedPtr((void *)data, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = arr; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; // transfer 3d data to cuda array CUDA_SAFE_CALL( cudaMemcpy3D(&copyParams) ); // set texture parameters texRef->normalized = true; texRef->filterMode = cudaFilterModeLinear; texRef->addressMode[0] = cudaAddressModeClamp; texRef->addressMode[1] = cudaAddressModeClamp; texRef->addressMode[2] = cudaAddressModeClamp; // bind array to global texture cudaBindTextureToArray(texRef, arr, &descf); }
3d8a5bdb6b3d6935c398106f1dcea125520a75a7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include "helper_functions.h" #include "helper_cuda.h" #include <stdio.h> #include "../include/hamiltonian.h" #include "hamiltonian_kernel.cu" #include "../include/reducer.h" void checkCublasErrors(hipblasStatus_t ret){ if (ret != HIPBLAS_STATUS_SUCCESS){ printf("hipblasCreate returned error code %d, line(%d)\n", ret, __LINE__); exit(-1); } } /* Actual CUDA functions */ ///////////////////////////////////////// 3D functions////////////////////////////////////////////// float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D(float3 * h_q, float3 * h_p, float3 * h_hq, float3 * h_hp, float9 * h_hqq, float9 * h_hqp, float9 * h_hpp, float sigma, int k, bool flag_hessian, bool dataInDevice ){ // Parameters float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // Initialize cublas hipblasHandle_t handle; checkCublasErrors( hipblasCreate(&handle) ); // hipblasOperation_t trans = HIPBLAS_OP_N; // AT if transa == HIPBLAS_OP_T hipblasOperation_t trans = HIPBLAS_OP_T; // AT if transa == HIPBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff float3 * d_q; float3 * d_p; if (dataInDevice){ d_q = h_q; d_p = h_p; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float3), hipMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // Initialize hamiltonian float H = 0.0; // allocate the memory float * d_pi_pj; float * d_pi_pj_g; float3 * d_dq; float * d_g; checkCudaErrors(hipMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pi_pj_g, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_dq, k2*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_g, k2*sizeof(float))); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dqpipjKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q, d_dq, d_g, f, d_p, d_pi_pj, k); hipLaunchKernelGGL(( multiplyKernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_g, d_pi_pj_g, k, k); float * h_pi_pj_g = new float[k2]; // Calculate H H = 0.5 * Reducer::reduce_sum_wrapper(k2, d_pi_pj_g); checkCudaErrors(hipFree(d_pi_pj_g)); // Calculate the 1st derivative //printf("Calculating 1st derivative...\n"); float * d_pi_pj_g1_dq_x; float * d_pi_pj_g1_dq_y; float * d_pi_pj_g1_dq_z; float * d_p_g_x; float * d_p_g_y; float * d_p_g_z; checkCudaErrors(hipMalloc((void **)&d_pi_pj_g1_dq_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pi_pj_g1_dq_y, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pi_pj_g1_dq_z, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_p_g_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_p_g_y, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_p_g_z, k2*sizeof(float))); // Precompute the terms that need to be added up threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hqhpPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_dq, d_g, d_p, f, d_pi_pj_g1_dq_x, d_pi_pj_g1_dq_y, d_pi_pj_g1_dq_z, d_p_g_x, d_p_g_y, d_p_g_z, k); float * d_one; checkCudaErrors(hipMalloc((void **)&d_one, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( onesKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_one, k); // Allocate the memory float * d_hq_x; float * d_hq_y; float * d_hq_z; float * d_hp_x; float * d_hp_y; float * d_hp_z; checkCudaErrors(hipMalloc((void **)&d_hq_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hq_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hq_z, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hp_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hp_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hp_z, k*sizeof(float))); // Use CUBLAS to multiply the terms by one vector to add up checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_x, k, d_one, 1, &beta, d_hq_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_y, k, d_one, 1, &beta, d_hq_y, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_z, k, d_one, 1, &beta, d_hq_z, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_p_g_x, k, d_one, 1, &beta, d_hp_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_p_g_y, k, d_one, 1, &beta, d_hp_y, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_p_g_z, k, d_one, 1, &beta, d_hp_z, 1) ); // clean up checkCudaErrors(hipFree(d_pi_pj_g1_dq_x)); checkCudaErrors(hipFree(d_pi_pj_g1_dq_y)); checkCudaErrors(hipFree(d_pi_pj_g1_dq_z)); checkCudaErrors(hipFree(d_p_g_x)); checkCudaErrors(hipFree(d_p_g_y)); checkCudaErrors(hipFree(d_p_g_z)); // TODO: copy the result back to host float3 * d_hq; float3 * d_hp; if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float3))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( Float2Float3Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_hq_x, d_hq_y, d_hq_z, d_hq, k); hipLaunchKernelGGL(( Float2Float3Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_hp_x, d_hp_y, d_hp_z, d_hp, k); checkCudaErrors(hipFree(d_hq_x)); checkCudaErrors(hipFree(d_hq_y)); checkCudaErrors(hipFree(d_hq_z)); checkCudaErrors(hipFree(d_hp_x)); checkCudaErrors(hipFree(d_hp_y)); checkCudaErrors(hipFree(d_hp_z)); // Some memory control stuff if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } //printf("Done 1st derivative.\n"); // Calculate the 2nd derivatives if (flag_hessian){ //printf("Calculating 2nd derivative...\n"); //printf("Calculating hqq...\n"); /////////////////////////////////////////////////////////////////////////////////////////////////////// /* hqq */ float * d_hqq_xx; float * d_hqq_xy; float * d_hqq_xz; float * d_hqq_yx; float * d_hqq_yy; float * d_hqq_yz; float * d_hqq_zx; float * d_hqq_zy; float * d_hqq_zz; // Allocate memory //printf("hqq: Allocating mem...\n"); checkCudaErrors(hipMalloc((void **)&d_hqq_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_xy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_xz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_yx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_yy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_yz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_zx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_zy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_zz, k2*sizeof(float))); // Precompute the terms //printf("hqq: Precomputing...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hqqPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_g, d_dq, f, d_hqq_xx, d_hqq_xy, d_hqq_xz, d_hqq_yx, d_hqq_yy, d_hqq_yz, d_hqq_zx, d_hqq_zy, d_hqq_zz, k); // The diagonal terms need sum - again use cublas float * d_hqq_diag_xx; float * d_hqq_diag_xy; float * d_hqq_diag_xz; float * d_hqq_diag_yx; float * d_hqq_diag_yy; float * d_hqq_diag_yz; float * d_hqq_diag_zx; float * d_hqq_diag_zy; float * d_hqq_diag_zz; checkCudaErrors(hipMalloc((void **)&d_hqq_diag_xx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_xy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_xz, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_yx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_yy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_yz, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_zx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_zy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_zz, k*sizeof(float))); // cublas sum //printf("hqq: cublas sum...\n"); float * d_mone; checkCudaErrors(hipMalloc((void **)&d_mone, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( fillKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_mone, k, -1 ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_xx, k, d_mone, 1, &beta, d_hqq_diag_xx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_xy, k, d_mone, 1, &beta, d_hqq_diag_xy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_xz, k, d_mone, 1, &beta, d_hqq_diag_xz, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_yx, k, d_mone, 1, &beta, d_hqq_diag_yx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_yy, k, d_mone, 1, &beta, d_hqq_diag_yy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_yz, k, d_mone, 1, &beta, d_hqq_diag_yz, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_zx, k, d_mone, 1, &beta, d_hqq_diag_zx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_zy, k, d_mone, 1, &beta, d_hqq_diag_zy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_zz, k, d_mone, 1, &beta, d_hqq_diag_zz, 1) ); checkCudaErrors(hipFree(d_mone)); // Copy the diagonal terms into the matrix //printf("hqq: copy diagonal term...\n"); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xx, d_hqq_diag_xx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xy, d_hqq_diag_xy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xz, d_hqq_diag_xz, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_yx, d_hqq_diag_yx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_yy, d_hqq_diag_yy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_yz, d_hqq_diag_yz, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_zx, d_hqq_diag_zx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_zy, d_hqq_diag_zy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_zz, d_hqq_diag_zz, k); checkCudaErrors(hipFree(d_hqq_diag_xx)); checkCudaErrors(hipFree(d_hqq_diag_xy)); checkCudaErrors(hipFree(d_hqq_diag_xz)); checkCudaErrors(hipFree(d_hqq_diag_yx)); checkCudaErrors(hipFree(d_hqq_diag_yy)); checkCudaErrors(hipFree(d_hqq_diag_yz)); checkCudaErrors(hipFree(d_hqq_diag_zx)); checkCudaErrors(hipFree(d_hqq_diag_zy)); checkCudaErrors(hipFree(d_hqq_diag_zz)); // copy the result back to host //printf("hqq: copy back the result...\n"); float9 * d_hqq; if (dataInDevice){ d_hqq = h_hqq; }else{ checkCudaErrors(hipMalloc((void **)&d_hqq, k2*sizeof(float9))); } //printf("hqq: Done allocate memory...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( Float2Float9Kernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xx, d_hqq_xy, d_hqq_xz, d_hqq_yx, d_hqq_yy, d_hqq_yz, d_hqq_zx, d_hqq_zy, d_hqq_zz, d_hqq, k); //printf("hqq: Done copy 9 float to float9...\n"); checkCudaErrors(hipFree(d_hqq_xx)); checkCudaErrors(hipFree(d_hqq_xy)); checkCudaErrors(hipFree(d_hqq_xz)); checkCudaErrors(hipFree(d_hqq_yx)); checkCudaErrors(hipFree(d_hqq_yy)); checkCudaErrors(hipFree(d_hqq_yz)); checkCudaErrors(hipFree(d_hqq_zx)); checkCudaErrors(hipFree(d_hqq_zy)); checkCudaErrors(hipFree(d_hqq_zz)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(hipMemcpy(h_hqq, d_hqq, k2 * sizeof(float9), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hqq)); } //printf("hqq: Done copy back to host...\n"); //printf("Done hqq.\n"); //printf("Calculating hqp...\n"); //////////////////////////////////////////////////////////////////////////////////////////////////// /* hqp */ float * d_hqp_xx; float * d_hqp_xy; float * d_hqp_xz; float * d_hqp_yx; float * d_hqp_yy; float * d_hqp_yz; float * d_hqp_zx; float * d_hqp_zy; float * d_hqp_zz; float * d_hqp_ii_xx; float * d_hqp_ii_xy; float * d_hqp_ii_xz; float * d_hqp_ii_yx; float * d_hqp_ii_yy; float * d_hqp_ii_yz; float * d_hqp_ii_zx; float * d_hqp_ii_zy; float * d_hqp_ii_zz; // Allocate memory checkCudaErrors(hipMalloc((void **)&d_hqp_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_xy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_xz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_yx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_yy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_yz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_zx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_zy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_zz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_xy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_xz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_yx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_yy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_yz, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_zx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_zy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_zz, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hqpPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_p, d_g, f, d_dq, d_hqp_xx, d_hqp_xy, d_hqp_xz, d_hqp_yx, d_hqp_yy, d_hqp_yz, d_hqp_zx, d_hqp_zy, d_hqp_zz, d_hqp_ii_xx, d_hqp_ii_xy, d_hqp_ii_xz, d_hqp_ii_yx, d_hqp_ii_yy, d_hqp_ii_yz, d_hqp_ii_zx, d_hqp_ii_zy, d_hqp_ii_zz, k); // The diagonal terms need sum - again use cublas float * d_hqp_diag_xx; float * d_hqp_diag_xy; float * d_hqp_diag_xz; float * d_hqp_diag_yx; float * d_hqp_diag_yy; float * d_hqp_diag_yz; float * d_hqp_diag_zx; float * d_hqp_diag_zy; float * d_hqp_diag_zz; checkCudaErrors(hipMalloc((void **)&d_hqp_diag_xx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_xy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_xz, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_yx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_yy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_yz, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_zx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_zy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_zz, k*sizeof(float))); // cublas sum checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xx, k, d_one, 1, &beta, d_hqp_diag_xx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xy, k, d_one, 1, &beta, d_hqp_diag_xy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xz, k, d_one, 1, &beta, d_hqp_diag_xz, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yx, k, d_one, 1, &beta, d_hqp_diag_yx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yy, k, d_one, 1, &beta, d_hqp_diag_yy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yz, k, d_one, 1, &beta, d_hqp_diag_yz, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_zx, k, d_one, 1, &beta, d_hqp_diag_zx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_zy, k, d_one, 1, &beta, d_hqp_diag_zy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_zz, k, d_one, 1, &beta, d_hqp_diag_zz, 1) ); // Release checkCudaErrors(hipFree(d_hqp_ii_xx)); checkCudaErrors(hipFree(d_hqp_ii_xy)); checkCudaErrors(hipFree(d_hqp_ii_xz)); checkCudaErrors(hipFree(d_hqp_ii_yx)); checkCudaErrors(hipFree(d_hqp_ii_yy)); checkCudaErrors(hipFree(d_hqp_ii_yz)); checkCudaErrors(hipFree(d_hqp_ii_zx)); checkCudaErrors(hipFree(d_hqp_ii_zy)); checkCudaErrors(hipFree(d_hqp_ii_zz)); // copy the diagonal terms into the matrix threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xx, d_hqp_diag_xx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xy, d_hqp_diag_xy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xz, d_hqp_diag_xz, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_yx, d_hqp_diag_yx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_yy, d_hqp_diag_yy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_yz, d_hqp_diag_yz, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_zx, d_hqp_diag_zx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_zy, d_hqp_diag_zy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_zz, d_hqp_diag_zz, k); checkCudaErrors(hipFree(d_hqp_diag_xx)); checkCudaErrors(hipFree(d_hqp_diag_xy)); checkCudaErrors(hipFree(d_hqp_diag_xz)); checkCudaErrors(hipFree(d_hqp_diag_yx)); checkCudaErrors(hipFree(d_hqp_diag_yy)); checkCudaErrors(hipFree(d_hqp_diag_yz)); checkCudaErrors(hipFree(d_hqp_diag_zx)); checkCudaErrors(hipFree(d_hqp_diag_zy)); checkCudaErrors(hipFree(d_hqp_diag_zz)); // copy the result back to host float9 * d_hqp; if (dataInDevice){ d_hqp = h_hqp; }else{ checkCudaErrors(hipMalloc((void **)&d_hqp, k2*sizeof(float9))); } threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( Float2Float9Kernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xx, d_hqp_xy, d_hqp_xz, d_hqp_yx, d_hqp_yy, d_hqp_yz, d_hqp_zx, d_hqp_zy, d_hqp_zz, d_hqp, k); checkCudaErrors(hipFree(d_hqp_xx)); checkCudaErrors(hipFree(d_hqp_xy)); checkCudaErrors(hipFree(d_hqp_xz)); checkCudaErrors(hipFree(d_hqp_yx)); checkCudaErrors(hipFree(d_hqp_yy)); checkCudaErrors(hipFree(d_hqp_yz)); checkCudaErrors(hipFree(d_hqp_zx)); checkCudaErrors(hipFree(d_hqp_zy)); checkCudaErrors(hipFree(d_hqp_zz)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(hipMemcpy(h_hqp, d_hqp, k2 * sizeof(float9), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hqp)); } //printf("Done hqp.\n"); //printf("Calculating hpp...\n"); //////////////////////////////////////////////////////////////////////////////////////////// /* hpp */ float * d_hpp_xx; float * d_hpp_yy; float * d_hpp_zz; checkCudaErrors(hipMalloc((void **)&d_hpp_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hpp_yy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hpp_zz, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hppPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_g, d_hpp_xx, d_hpp_yy, d_hpp_zz, k); // copy the result back to host float * d_zero; checkCudaErrors(hipMalloc((void **)&d_zero, k2*sizeof(float))); checkCudaErrors(hipMemset(d_zero, 0, k2*sizeof(float))); float9 * d_hpp; if (dataInDevice){ d_hpp = h_hpp; }else{ checkCudaErrors(hipMalloc((void **)&d_hpp, k2*sizeof(float9))); } hipLaunchKernelGGL(( Float2Float9Kernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_hpp_xx, d_zero, d_zero, d_zero, d_hpp_yy, d_zero, d_zero, d_zero, d_hpp_zz, d_hpp, k); checkCudaErrors(hipFree(d_zero)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(hipMemcpy(h_hpp, d_hpp, k2 * sizeof(float9), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hpp)); } //printf("Done hpp.\n"); //printf("Done 2nd derivative.\n"); } checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double hamiltonian_time = sdkGetTimerValue(&hTimer); //printf("Hamiltonian takes %f ms.\n", hamiltonian_time); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( hipFree(d_p) ); checkCudaErrors( hipFree(d_q) ); } checkCudaErrors(hipFree(d_pi_pj)); checkCudaErrors(hipFree(d_dq)); checkCudaErrors(hipFree(d_g)); checkCudaErrors(hipFree(d_one)); checkCublasErrors( hipblasDestroy(handle) ); checkCudaErrors(hipDeviceSynchronize()); return H; } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D(float3 * h_q, float3 * h_p, float3 * h_alpha, float3 * h_beta, float3 * h_dalpha, float3 * h_dbeta, float sigma, int k, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // variables float3 * d_q; float3 * d_p; float3 * d_alpha; float3 * d_beta; // Initialize cublas hipblasHandle_t handle; checkCublasErrors( hipblasCreate(&handle) ); // hipblasOperation_t trans = HIPBLAS_OP_N; // AT if transa == HIPBLAS_OP_T hipblasOperation_t trans = HIPBLAS_OP_T; // AT if transa == HIPBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_beta, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_alpha, h_alpha, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_beta, h_beta, k * sizeof(float3), hipMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // allocate the memory for these intermediate variables float * d_pi_pj; float3 * d_dq; float * d_g; float * d_one; checkCudaErrors(hipMalloc((void **)&d_one, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_dq, k2*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_g, k2*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( onesKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_one, k); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dqpipjKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q, d_dq, d_g, f, d_p, d_pi_pj, k); // Calculate the dj-di threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); float3 * d_dbji; checkCudaErrors(hipMalloc((void **)&d_dbji, k2*sizeof(float3))); hipLaunchKernelGGL(( dbjiKernel) , dim3(blocks), dim3(threads) , 0, 0, d_beta, d_dbji, k ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dalpha */ // Precompute for the da and aa terms float * d_da_pre_x; float * d_da_pre_y; float * d_da_pre_z; checkCudaErrors(hipMalloc((void **)&d_da_pre_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_da_pre_y, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_da_pre_z, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dalphaPrecomputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_dq, d_g, d_dbji, f, k, d_da_pre_x, d_da_pre_y, d_da_pre_z, d_p, d_alpha); // Use cublas to sum float * d_da_x; float * d_da_y; float * d_da_z; checkCudaErrors(hipMalloc((void **)&d_da_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_da_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_da_z, k*sizeof(float))); // cublas m * v checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_da_pre_x, k, d_one, 1, &beta, d_da_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_da_pre_y, k, d_one, 1, &beta, d_da_y, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_da_pre_z, k, d_one, 1, &beta, d_da_z, 1) ); checkCudaErrors( hipFree(d_da_pre_x) ); checkCudaErrors( hipFree(d_da_pre_y) ); checkCudaErrors( hipFree(d_da_pre_z) ); // 3 float to float3 float3 * d_dalpha; if (dataInDevice){ d_dalpha = h_dalpha; } else { checkCudaErrors(hipMalloc((void **)&d_dalpha, k*sizeof(float3))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( Float2Float3Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_da_x, d_da_y, d_da_z, d_dalpha, k); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_dalpha, d_dalpha, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors( hipFree(d_dalpha) ); } checkCudaErrors( hipFree(d_da_x) ); checkCudaErrors( hipFree(d_da_y) ); checkCudaErrors( hipFree(d_da_z) ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dbeta */ // precompute float * d_db_pre_x; float * d_db_pre_y; float * d_db_pre_z; checkCudaErrors(hipMalloc((void **)&d_db_pre_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_db_pre_y, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_db_pre_z, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dbetaPrecomputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_p, d_dq, d_g, d_dbji, f, k, d_db_pre_x, d_db_pre_y, d_db_pre_z, d_alpha); // Use cublas to sum float * d_db_x; float * d_db_y; float * d_db_z; checkCudaErrors(hipMalloc((void **)&d_db_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_db_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_db_z, k*sizeof(float))); // cublas m * v checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_db_pre_x, k, d_one, 1, &beta, d_db_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_db_pre_y, k, d_one, 1, &beta, d_db_y, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_db_pre_z, k, d_one, 1, &beta, d_db_z, 1) ); checkCudaErrors( hipFree(d_db_pre_x) ); checkCudaErrors( hipFree(d_db_pre_y) ); checkCudaErrors( hipFree(d_db_pre_z) ); // 3 float to float3 float3 * d_dbeta; if (dataInDevice){ d_dbeta = h_dbeta; } else { checkCudaErrors(hipMalloc((void **)&d_dbeta, k*sizeof(float3))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( Float2Float3Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_db_x, d_db_y, d_db_z, d_dbeta, k); // add the alpha term threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( addKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_dbeta, d_alpha, d_dbeta, k ); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_dbeta, d_dbeta, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors( hipFree(d_dbeta) ); } checkCudaErrors( hipFree(d_db_x) ); checkCudaErrors( hipFree(d_db_y) ); checkCudaErrors( hipFree(d_db_z) ); // stop timer checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Alpha_beta takes %f ms.\n", alpha_beta_time); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( hipFree(d_p) ); checkCudaErrors( hipFree(d_q) ); checkCudaErrors( hipFree(d_alpha) ); checkCudaErrors( hipFree(d_beta) ); } checkCudaErrors(hipFree(d_pi_pj)); checkCudaErrors(hipFree(d_dq)); checkCudaErrors(hipFree(d_g)); checkCudaErrors(hipFree(d_dbji)); checkCudaErrors(hipFree(d_one)); checkCublasErrors( hipblasDestroy(handle) ); checkCudaErrors(hipDeviceSynchronize()); } float PointSetHamiltonianSystem_CUDA::landmarkError_CUDA3D(float3 * h_q1, float3 * h_qT, float3 * h_alpha, int k, bool dataInDevice){ // Variables float3 * d_q1; float3 * d_qT; float3 * d_alpha; float * d_alpha_mag; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_alpha_mag, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_alpha = h_alpha; }else{ checkCudaErrors(hipMalloc((void **)&d_q1, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_qT, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q1, h_q1, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_qT, h_qT, k * sizeof(float3), hipMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( minusAndMagKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_q1, d_qT, d_alpha, d_alpha_mag, k); float fnorm_sq = Reducer::reduce_sum_wrapper(k, d_alpha_mag); // Clean up checkCudaErrors(hipFree(d_alpha_mag)); if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(hipMemcpy(h_alpha, d_alpha, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_q1)); checkCudaErrors(hipFree(d_qT)); checkCudaErrors(hipFree(d_alpha)); } return fnorm_sq; } void PointSetHamiltonianSystem_CUDA::combineGradient_CUDA3D(float3 * h_grad, float3 * h_hp, int k, float lambda, bool dataInDevice){ // Variables float3 * d_grad; float3 * d_hp; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_grad = h_grad; d_hp = h_hp; }else{ checkCudaErrors(hipMalloc((void **)&d_grad, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_grad, h_grad, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_hp, h_grad, k * sizeof(float3), hipMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_hp, d_grad, lambda, d_grad, k); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(hipMemcpy(h_grad, d_grad, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_grad)); checkCudaErrors(hipFree(d_hp)); } } void PointSetHamiltonianSystem_CUDA::initP_CUDA3D(float3 * h_q0, float3 * h_qT, float3 * h_p0, int N, int k, bool dataInDevice){ // Variables float3 * d_q0; float3 * d_qT; float3 * d_p0; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_q0 = h_q0; d_qT = h_qT; d_p0 = h_p0; }else{ checkCudaErrors(hipMalloc((void **)&d_q0, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_qT, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p0, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q0, h_q0, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_qT, h_qT, k * sizeof(float3), hipMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( minusAndDivideKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_qT, d_q0, d_p0, (float) N, k ); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(hipMemcpy(h_p0, d_p0, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_q0)); checkCudaErrors(hipFree(d_qT)); checkCudaErrors(hipFree(d_p0)); } } void PointSetHamiltonianSystem_CUDA::GAlphaBeta_CUDA3D(float3 * h_q1, float3 * h_qT, float3 * h_p1, float3 * h_alpha, float3 * h_beta, float &Gnorm_sq, float &dsq, float lambda, int k, bool dataInDevice ){ // Variables float3 * d_q1; float3 * d_qT; float3 * d_p1; float3 * d_alpha; float3 * d_beta; float * d_gnsq; float * d_dsq; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_gnsq, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_dsq, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_p1 = h_p1; d_alpha = h_alpha; d_beta = h_beta; }else{ checkCudaErrors(hipMalloc((void **)&d_q1, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_qT, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p1, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_beta, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q1, h_q1, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_qT, h_qT, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p1, h_p1, k * sizeof(float3), hipMemcpyHostToDevice)); } // Compute G and alpha/beta threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( GAlphaBetaKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q1, d_qT, d_p1, d_alpha, d_beta, d_gnsq, d_dsq, lambda, k); Gnorm_sq = Reducer::reduce_sum_wrapper(k, d_gnsq); dsq = Reducer::reduce_sum_wrapper(k, d_dsq); // Clean up checkCudaErrors(hipFree(d_gnsq)); checkCudaErrors(hipFree(d_dsq)); if (dataInDevice){ // Do nothing. Duty of managing memory reply on outside code }else{ checkCudaErrors(hipMemcpy(h_alpha, d_alpha, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_beta, d_beta, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_q1)); checkCudaErrors(hipFree(d_qT)); checkCudaErrors(hipFree(d_p1)); checkCudaErrors(hipFree(d_alpha)); checkCudaErrors(hipFree(d_beta)); } } void PointSetHamiltonianSystem_CUDA::FlowHamiltonianWithGradient_CUDA3D( std::vector<float3*> &Qt, std::vector<float3*> &Pt, float3 * h_q0, float3 * h_p0, float3 * h_q, float3 * h_p, float9 * h_grad_q, float9 * h_grad_p, int N, int k, bool dataInDevice){ } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA3D(float3 * h_q0, float3 * h_p0, float3 * h_q1, float3 * h_p1, float3 * h_hq, float3 * h_hp, std::vector<float3*> &Qt, std::vector<float3*> &Pt, float sigma, int k, int N, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float3 * d_q_t; float3 * d_p_t; float3 * d_hq; float3 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(hipMalloc((void **)&d_q_t, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p_t, k*sizeof(float3))); // Some memory control stuff if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float3), hipMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float3), hipMemcpyDeviceToDevice)); } } else { checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float3), hipMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float3), hipMemcpyHostToHost)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float3), hipMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D(d_q_t, d_p_t, d_hq, d_hp, NULL, NULL, NULL, sigma, k, false, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_q_t, d_hp, dt, d_q_t, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float3), hipMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float3), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. Duty to manage mem replies on outside code }else{ checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } checkCudaErrors(hipFree(d_q_t)); checkCudaErrors(hipFree(d_p_t)); return H0; } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA3D( std::vector<float3*> &Qt, std::vector<float3*> &Pt, const float3 * d_alpha, const float3 * d_beta, float3 * d_result, float sigma, int k, int N, bool dataInDevice){ // Variables float3 * d_a; float3 * d_b; float3 * d_Da; float3 * d_Db; float3 * d_q; float3 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_a, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_b, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_Da, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_Db, k*sizeof(float3))); if (dataInDevice){ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(hipMemcpy(d_q, Qt[t - 1], k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, Pt[t - 1], k * sizeof(float3), hipMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_a, d_Da, dt, d_a, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float3), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_p)); } checkCudaErrors(hipFree(d_a)); checkCudaErrors(hipFree(d_b)); checkCudaErrors(hipFree(d_Da)); checkCudaErrors(hipFree(d_Db)); } void PointSetHamiltonianSystem_CUDA::InterpolateVelocity_CUDA3D(int t, const float3 x, float3 &v, std::vector<float3*> &Qt, std::vector<float3*> &Pt, float sigma, int k, bool dataInDevice){ // Variables float f = -0.5 / (sigma * sigma); dim3 threads; dim3 blocks; float3 * d_q; float3 * d_p; float * d_KqPt_x; float * d_KqPt_y; float * d_KqPt_z; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_KqPt_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_KqPt_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_KqPt_z, k*sizeof(float))); if (dataInDevice){ d_q = Qt[t]; d_p = Pt[t]; }else{ checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q, Qt[t], k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, Pt[t], k * sizeof(float3), hipMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( KqPtKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q, d_p, x, f, d_KqPt_x, d_KqPt_y, d_KqPt_z, k); v.x = Reducer::reduce_sum_wrapper(k, d_KqPt_x); v.y = Reducer::reduce_sum_wrapper(k, d_KqPt_y); v.z = Reducer::reduce_sum_wrapper(k, d_KqPt_z); // Clean up if (dataInDevice){ // Do nothing } else { checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_p)); } checkCudaErrors(hipFree(d_KqPt_x)); checkCudaErrors(hipFree(d_KqPt_y)); checkCudaErrors(hipFree(d_KqPt_z)); } ///////////////////////////////////////// 2D functions////////////////////////////////////////////// float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D(float2 * h_q, float2 * h_p, float2 * h_hq, float2 * h_hp, float4 * h_hqq, float4 * h_hqp, float4 * h_hpp, float sigma, int k, bool flag_hessian, bool dataInDevice ){ // Parameters float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // Initialize cublas hipblasHandle_t handle; checkCublasErrors( hipblasCreate(&handle) ); // hipblasOperation_t trans = HIPBLAS_OP_N; // AT if transa == HIPBLAS_OP_T hipblasOperation_t trans = HIPBLAS_OP_T; // AT if transa == HIPBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff float2 * d_q; float2 * d_p; if (dataInDevice){ d_q = h_q; d_p = h_p; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q, h_q, k*sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k*sizeof(float2), hipMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // Initialize hamiltonian float H = 0.0; // allocate the memory float * d_pi_pj; float * d_pi_pj_g; float2 * d_dq; float * d_g; checkCudaErrors(hipMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pi_pj_g, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_dq, k2*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_g, k2*sizeof(float))); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dqpipjKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q, d_dq, d_g, f, d_p, d_pi_pj, k); hipLaunchKernelGGL(( multiplyKernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_g, d_pi_pj_g, k, k); float * h_pi_pj_g = new float[k2]; // Calculate H H = 0.5 * Reducer::reduce_sum_wrapper(k2, d_pi_pj_g); checkCudaErrors(hipFree(d_pi_pj_g)); // Calculate the 1st derivative //printf("Calculating 1st derivative...\n"); float * d_pi_pj_g1_dq_x; float * d_pi_pj_g1_dq_y; float * d_p_g_x; float * d_p_g_y; checkCudaErrors(hipMalloc((void **)&d_pi_pj_g1_dq_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pi_pj_g1_dq_y, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_p_g_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_p_g_y, k2*sizeof(float))); // Precompute the terms that need to be added up threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hqhpPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_dq, d_g, d_p, f, d_pi_pj_g1_dq_x, d_pi_pj_g1_dq_y, d_p_g_x, d_p_g_y, k); float * d_one; checkCudaErrors(hipMalloc((void **)&d_one, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( onesKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_one, k); // Allocate the memory float * d_hq_x; float * d_hq_y; float * d_hq_z; float * d_hp_x; float * d_hp_y; float * d_hp_z; checkCudaErrors(hipMalloc((void **)&d_hq_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hq_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hq_z, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hp_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hp_y, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hp_z, k*sizeof(float))); // Use CUBLAS to multiply the terms by one vector to add up checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_x, k, d_one, 1, &beta, d_hq_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_y, k, d_one, 1, &beta, d_hq_y, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_p_g_x, k, d_one, 1, &beta, d_hp_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_p_g_y, k, d_one, 1, &beta, d_hp_y, 1) ); // clean up checkCudaErrors(hipFree(d_pi_pj_g1_dq_x)); checkCudaErrors(hipFree(d_pi_pj_g1_dq_y)); checkCudaErrors(hipFree(d_p_g_x)); checkCudaErrors(hipFree(d_p_g_y)); // TODO: copy the result back to host float2 * d_hq; float2 * d_hp; if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float2))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( Float2Float2Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_hq_x, d_hq_y, d_hq, k); hipLaunchKernelGGL(( Float2Float2Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_hp_x, d_hp_y, d_hp, k); checkCudaErrors(hipFree(d_hq_x)); checkCudaErrors(hipFree(d_hq_y)); checkCudaErrors(hipFree(d_hp_x)); checkCudaErrors(hipFree(d_hp_y)); // Some memory control stuff if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } //printf("Done 1st derivative.\n"); // Calculate the 2nd derivatives if (flag_hessian){ //printf("Calculating 2nd derivative...\n"); //printf("Calculating hqq...\n"); /////////////////////////////////////////////////////////////////////////////////////////////////////// /* hqq */ float * d_hqq_xx; float * d_hqq_xy; float * d_hqq_yx; float * d_hqq_yy; // Allocate memory //printf("hqq: Allocating mem...\n"); checkCudaErrors(hipMalloc((void **)&d_hqq_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_xy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_yx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_yy, k2*sizeof(float))); // Precompute the terms //printf("hqq: Precomputing...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hqqPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_g, d_dq, f, d_hqq_xx, d_hqq_xy, d_hqq_yx, d_hqq_yy, k); // The diagonal terms need sum - again use cublas float * d_hqq_diag_xx; float * d_hqq_diag_xy; float * d_hqq_diag_yx; float * d_hqq_diag_yy; checkCudaErrors(hipMalloc((void **)&d_hqq_diag_xx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_xy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_yx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqq_diag_yy, k*sizeof(float))); // cublas sum //printf("hqq: cublas sum...\n"); float * d_mone; checkCudaErrors(hipMalloc((void **)&d_mone, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( fillKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_mone, k, -1 ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_xx, k, d_mone, 1, &beta, d_hqq_diag_xx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_xy, k, d_mone, 1, &beta, d_hqq_diag_xy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_yx, k, d_mone, 1, &beta, d_hqq_diag_yx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqq_yy, k, d_mone, 1, &beta, d_hqq_diag_yy, 1) ); checkCudaErrors(hipFree(d_mone)); // Copy the diagonal terms into the matrix //printf("hqq: copy diagonal term...\n"); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xx, d_hqq_diag_xx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xy, d_hqq_diag_xy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_yx, d_hqq_diag_yx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_yy, d_hqq_diag_yy, k); checkCudaErrors(hipFree(d_hqq_diag_xx)); checkCudaErrors(hipFree(d_hqq_diag_xy)); checkCudaErrors(hipFree(d_hqq_diag_yx)); checkCudaErrors(hipFree(d_hqq_diag_yy)); // copy the result back to host //printf("hqq: copy back the result...\n"); float4 * d_hqq; if (dataInDevice){ d_hqq = h_hqq; }else{ checkCudaErrors(hipMalloc((void **)&d_hqq, k2*sizeof(float4))); } //printf("hqq: Done allocate memory...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( Float2Float4Kernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_hqq_xx, d_hqq_xy, d_hqq_yx, d_hqq_yy, d_hqq, k); //printf("hqq: Done copy 9 float to float9...\n"); checkCudaErrors(hipFree(d_hqq_xx)); checkCudaErrors(hipFree(d_hqq_xy)); checkCudaErrors(hipFree(d_hqq_yx)); checkCudaErrors(hipFree(d_hqq_yy)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(hipMemcpy(h_hqq, d_hqq, k2 * sizeof(float4), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hqq)); } //printf("hqq: Done copy back to host...\n"); //printf("Done hqq.\n"); //printf("Calculating hqp...\n"); //////////////////////////////////////////////////////////////////////////////////////////////////// /* hqp */ float * d_hqp_xx; float * d_hqp_xy; float * d_hqp_yx; float * d_hqp_yy; float * d_hqp_ii_xx; float * d_hqp_ii_xy; float * d_hqp_ii_yx; float * d_hqp_ii_yy; // Allocate memory checkCudaErrors(hipMalloc((void **)&d_hqp_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_xy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_yx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_yy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_xy, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_yx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_ii_yy, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hqpPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_p, d_g, f, d_dq, d_hqp_xx, d_hqp_xy, d_hqp_yx, d_hqp_yy, d_hqp_ii_xx, d_hqp_ii_xy, d_hqp_ii_yx, d_hqp_ii_yy, k); // The diagonal terms need sum - again use cublas float * d_hqp_diag_xx; float * d_hqp_diag_xy; float * d_hqp_diag_yx; float * d_hqp_diag_yy; checkCudaErrors(hipMalloc((void **)&d_hqp_diag_xx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_xy, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_yx, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hqp_diag_yy, k*sizeof(float))); // cublas sum checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xx, k, d_one, 1, &beta, d_hqp_diag_xx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xy, k, d_one, 1, &beta, d_hqp_diag_xy, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yx, k, d_one, 1, &beta, d_hqp_diag_yx, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yy, k, d_one, 1, &beta, d_hqp_diag_yy, 1) ); // Release checkCudaErrors(hipFree(d_hqp_ii_xx)); checkCudaErrors(hipFree(d_hqp_ii_xy)); checkCudaErrors(hipFree(d_hqp_ii_yx)); checkCudaErrors(hipFree(d_hqp_ii_yy)); // copy the diagonal terms into the matrix threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xx, d_hqp_diag_xx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xy, d_hqp_diag_xy, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_yx, d_hqp_diag_yx, k); hipLaunchKernelGGL(( copyToDiagonal) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_yy, d_hqp_diag_yy, k); checkCudaErrors(hipFree(d_hqp_diag_xx)); checkCudaErrors(hipFree(d_hqp_diag_xy)); checkCudaErrors(hipFree(d_hqp_diag_yx)); checkCudaErrors(hipFree(d_hqp_diag_yy)); // copy the result back to host float4 * d_hqp; if (dataInDevice){ d_hqp = h_hqp; }else{ checkCudaErrors(hipMalloc((void **)&d_hqp, k2*sizeof(float4))); } threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( Float2Float4Kernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_hqp_xx, d_hqp_xy, d_hqp_yx, d_hqp_yy, d_hqp, k); checkCudaErrors(hipFree(d_hqp_xx)); checkCudaErrors(hipFree(d_hqp_xy)); checkCudaErrors(hipFree(d_hqp_yx)); checkCudaErrors(hipFree(d_hqp_yy)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(hipMemcpy(h_hqp, d_hqp, k2 * sizeof(float4), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hqp)); } //printf("Done hqp.\n"); //printf("Calculating hpp...\n"); //////////////////////////////////////////////////////////////////////////////////////////// /* hpp */ float * d_hpp_xx; float * d_hpp_yy; checkCudaErrors(hipMalloc((void **)&d_hpp_xx, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_hpp_yy, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( hppPreComputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_g, d_hpp_xx, d_hpp_yy, k); // copy the result back to host float * d_zero; checkCudaErrors(hipMalloc((void **)&d_zero, k2*sizeof(float))); checkCudaErrors(hipMemset(d_zero, 0, k2*sizeof(float))); float4 * d_hpp; if (dataInDevice){ d_hpp = h_hpp; }else{ checkCudaErrors(hipMalloc((void **)&d_hpp, k2*sizeof(float4))); } hipLaunchKernelGGL(( Float2Float4Kernel2D) , dim3(blocks), dim3(threads) , 0, 0, d_hpp_xx, d_zero, d_zero, d_hpp_yy, d_hpp, k); checkCudaErrors(hipFree(d_zero)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(hipMemcpy(h_hpp, d_hpp, k2 * sizeof(float4), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_hpp)); } //printf("Done hpp.\n"); //printf("Done 2nd derivative.\n"); } checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double hamiltonian_time = sdkGetTimerValue(&hTimer); //printf("Hamiltonian takes %f ms.\n", hamiltonian_time); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( hipFree(d_p) ); checkCudaErrors( hipFree(d_q) ); } checkCudaErrors(hipFree(d_pi_pj)); checkCudaErrors(hipFree(d_dq)); checkCudaErrors(hipFree(d_g)); checkCudaErrors(hipFree(d_one)); checkCublasErrors( hipblasDestroy(handle) ); checkCudaErrors(hipDeviceSynchronize()); return H; } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D(float2 * h_q, float2 * h_p, float2 * h_alpha, float2 * h_beta, float2 * h_dalpha, float2 * h_dbeta, float sigma, int k, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; float * d_one; checkCudaErrors(hipMalloc((void **)&d_one, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( onesKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_one, k); // Initialize cublas hipblasHandle_t handle; checkCublasErrors( hipblasCreate(&handle) ); // hipblasOperation_t trans = HIPBLAS_OP_N; // AT if transa == HIPBLAS_OP_T hipblasOperation_t trans = HIPBLAS_OP_T; // AT if transa == HIPBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff float2 * d_q; float2 * d_p; float2 * d_alpha; float2 * d_beta; if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_beta, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_alpha, h_alpha, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_beta, h_beta, k * sizeof(float2), hipMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // allocate the memory float * d_pi_pj; float2 * d_dq; float * d_g; checkCudaErrors(hipMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_dq, k2*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_g, k2*sizeof(float))); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dqpipjKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q, d_dq, d_g, f, d_p, d_pi_pj, k); // Calculate the dj-di threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); float2 * d_dbji; checkCudaErrors(hipMalloc((void **)&d_dbji, k2*sizeof(float2))); hipLaunchKernelGGL(( dbjiKernel) , dim3(blocks), dim3(threads) , 0, 0, d_beta, d_dbji, k ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dalpha */ // Precompute for the da and aa terms float * d_da_pre_x; float * d_da_pre_y; checkCudaErrors(hipMalloc((void **)&d_da_pre_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_da_pre_y, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dalphaPrecomputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_pi_pj, d_dq, d_g, d_dbji, f, k, d_da_pre_x, d_da_pre_y, d_p, d_alpha); // Use cublas to sum float * d_da_x; float * d_da_y; checkCudaErrors(hipMalloc((void **)&d_da_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_da_y, k*sizeof(float))); // cublas m * v checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_da_pre_x, k, d_one, 1, &beta, d_da_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_da_pre_y, k, d_one, 1, &beta, d_da_y, 1) ); checkCudaErrors( hipFree(d_da_pre_x) ); checkCudaErrors( hipFree(d_da_pre_y) ); // 2 float to float2 float2 * d_dalpha; if (dataInDevice){ d_dalpha = h_dalpha; } else { checkCudaErrors(hipMalloc((void **)&d_dalpha, k*sizeof(float2))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( Float2Float2Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_da_x, d_da_y, d_dalpha, k); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_dalpha, d_dalpha, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors( hipFree(d_dalpha) ); } checkCudaErrors( hipFree(d_da_x) ); checkCudaErrors( hipFree(d_da_y) ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dbeta */ // precompute float * d_db_pre_x; float * d_db_pre_y; checkCudaErrors(hipMalloc((void **)&d_db_pre_x, k2*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_db_pre_y, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hipLaunchKernelGGL(( dbetaPrecomputeKernel) , dim3(blocks), dim3(threads) , 0, 0, d_p, d_dq, d_g, d_dbji, f, k, d_db_pre_x, d_db_pre_y, d_alpha); // Use cublas to sum float * d_db_x; float * d_db_y; checkCudaErrors(hipMalloc((void **)&d_db_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_db_y, k*sizeof(float))); // cublas m * v checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_db_pre_x, k, d_one, 1, &beta, d_db_x, 1) ); checkCublasErrors( hipblasSgemv(handle, trans, k, k, &alf, d_db_pre_y, k, d_one, 1, &beta, d_db_y, 1) ); checkCudaErrors( hipFree(d_db_pre_x) ); checkCudaErrors( hipFree(d_db_pre_y) ); // 3 float to float3 float2 * d_dbeta; if (dataInDevice){ d_dbeta = h_dbeta; } else { checkCudaErrors(hipMalloc((void **)&d_dbeta, k*sizeof(float2))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( Float2Float2Kernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_db_x, d_db_y, d_dbeta, k); // add the alpha term threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( addKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_dbeta, d_alpha, d_dbeta, k ); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_dbeta, d_dbeta, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors( hipFree(d_dbeta) ); } checkCudaErrors( hipFree(d_db_x) ); checkCudaErrors( hipFree(d_db_y) ); // stop timer checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Alpha_beta takes %f ms.\n", alpha_beta_time); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( hipFree(d_p) ); checkCudaErrors( hipFree(d_q) ); checkCudaErrors( hipFree(d_alpha) ); checkCudaErrors( hipFree(d_beta) ); } checkCudaErrors(hipFree(d_pi_pj)); checkCudaErrors(hipFree(d_dq)); checkCudaErrors(hipFree(d_g)); checkCudaErrors(hipFree(d_dbji)); checkCudaErrors(hipFree(d_one)); checkCublasErrors( hipblasDestroy(handle) ); checkCudaErrors(hipDeviceSynchronize()); } float PointSetHamiltonianSystem_CUDA::landmarkError_CUDA2D(float2 * h_q1, float2 * h_qT, float2 * h_alpha, int k, bool dataInDevice){ // Variables float2 * d_q1; float2 * d_qT; float2 * d_alpha; float * d_alpha_mag; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_alpha_mag, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_alpha = h_alpha; }else{ checkCudaErrors(hipMalloc((void **)&d_q1, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_qT, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q1, h_q1, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_qT, h_qT, k * sizeof(float2), hipMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( minusAndMagKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_q1, d_qT, d_alpha, d_alpha_mag, k); float fnorm_sq = Reducer::reduce_sum_wrapper(k, d_alpha_mag); checkCudaErrors(hipFree(d_alpha_mag)); if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(hipMemcpy(h_alpha, d_alpha, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_q1)); checkCudaErrors(hipFree(d_qT)); checkCudaErrors(hipFree(d_alpha)); } return fnorm_sq; } void PointSetHamiltonianSystem_CUDA::combineGradient_CUDA2D(float2 * h_grad, float2 * h_hp, int k, float lambda, bool dataInDevice){ // Variables float2 * d_grad; float2 * d_hp; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_grad = h_grad; d_hp = h_hp; }else{ checkCudaErrors(hipMalloc((void **)&d_grad, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_grad, h_grad, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_hp, h_grad, k * sizeof(float2), hipMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_hp, d_grad, lambda, d_grad, k); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(hipMemcpy(h_grad, d_grad, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_grad)); checkCudaErrors(hipFree(d_hp)); } } void PointSetHamiltonianSystem_CUDA::initP_CUDA2D(float2 * h_q0, float2 * h_qT, float2 * h_p0, int N, int k, bool dataInDevice){ // Variables float2 * d_q0; float2 * d_qT; float2 * d_p0; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_q0 = h_q0; d_qT = h_qT; d_p0 = h_p0; }else{ checkCudaErrors(hipMalloc((void **)&d_q0, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_qT, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p0, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q0, h_q0, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_qT, h_qT, k * sizeof(float2), hipMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( minusAndDivideKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_qT, d_q0, d_p0, (float) N, k ); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(hipMemcpy(h_p0, d_p0, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_q0)); checkCudaErrors(hipFree(d_qT)); checkCudaErrors(hipFree(d_p0)); } } void PointSetHamiltonianSystem_CUDA::GAlphaBeta_CUDA2D(float2 * h_q1, float2 * h_qT, float2 * h_p1, float2 * h_alpha, float2 * h_beta, float &Gnorm_sq, float &dsq, float lambda, int k, bool dataInDevice ){ // Variables float2 * d_q1; float2 * d_qT; float2 * d_p1; float2 * d_alpha; float2 * d_beta; float * d_gnsq; float * d_dsq; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_gnsq, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_dsq, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_p1 = h_p1; d_alpha = h_alpha; d_beta = h_beta; }else{ checkCudaErrors(hipMalloc((void **)&d_q1, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_qT, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p1, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_beta, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q1, h_q1, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_qT, h_qT, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p1, h_p1, k * sizeof(float2), hipMemcpyHostToDevice)); } // Compute G and alpha/beta threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( GAlphaBetaKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q1, d_qT, d_p1, d_alpha, d_beta, d_gnsq, d_dsq, lambda, k); Gnorm_sq = Reducer::reduce_sum_wrapper(k, d_gnsq); dsq = Reducer::reduce_sum_wrapper(k, d_dsq); // Clean up checkCudaErrors(hipFree(d_gnsq)); checkCudaErrors(hipFree(d_dsq)); if (dataInDevice){ // Do nothing. Duty of managing memory reply on outside code }else{ checkCudaErrors(hipMemcpy(h_alpha, d_alpha, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_beta, d_beta, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_q1)); checkCudaErrors(hipFree(d_qT)); checkCudaErrors(hipFree(d_p1)); checkCudaErrors(hipFree(d_alpha)); checkCudaErrors(hipFree(d_beta)); } } void PointSetHamiltonianSystem_CUDA::FlowHamiltonianWithGradient_CUDA2D( std::vector<float2*> &Qt, std::vector<float2*> &Pt, float2 * h_q0, float2 * h_p0, float2 * h_q, float2 * h_p, float4 * h_grad_q, float4 * h_grad_p, int N, int k, bool dataInDevice){ } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA2D(float2 * h_q0, float2 * h_p0, float2 * h_q1, float2 * h_p1, float2 * h_hq, float2 * h_hp, std::vector<float2*> &Qt, std::vector<float2*> &Pt, float sigma, int k, int N, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float2 * d_q_t; float2 * d_p_t; float2 * d_hq; float2 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(hipMalloc((void **)&d_q_t, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p_t, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float2))); // Some memory control stuff if (dataInDevice){ checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float2), hipMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float2), hipMemcpyDeviceToDevice)); } } else { checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float2), hipMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float2), hipMemcpyHostToHost)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float2), hipMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D(d_q_t, d_p_t, d_hq, d_hp, NULL, NULL, NULL, sigma, k, false, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_q_t, d_hp, dt, d_q_t, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float2), hipMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float2), hipMemcpyDeviceToHost)); } // Clean up checkCudaErrors(hipFree(d_q_t)); checkCudaErrors(hipFree(d_p_t)); checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); return H0; } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA2D(std::vector<float2*> &Qt, std::vector<float2*> &Pt, const float2 * d_alpha, const float2 * d_beta, float2 * d_result, float sigma, int k, int N, bool dataInDevice){ // Variables float2 * d_a; float2 * d_b; float2 * d_Da; float2 * d_Db; float2 * d_q; float2 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_a, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_b, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_Da, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_Db, k*sizeof(float2))); if (dataInDevice){ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(hipMemcpy(d_q, Qt[t - 1], k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, Pt[t - 1], k * sizeof(float2), hipMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_a, d_Da, dt, d_a, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float2), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_p)); } checkCudaErrors(hipFree(d_a)); checkCudaErrors(hipFree(d_b)); checkCudaErrors(hipFree(d_Da)); checkCudaErrors(hipFree(d_Db)); } void PointSetHamiltonianSystem_CUDA::InterpolateVelocity_CUDA2D(unsigned int t, const float2 x, float2 &v, std::vector<float2*> &Qt, std::vector<float2*> &Pt, float sigma, int k, bool dataInDevice){ // Variables float f = -0.5 / (sigma * sigma); dim3 threads; dim3 blocks; float2 * d_q; float2 * d_p; float * d_KqPt_x; float * d_KqPt_y; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_KqPt_x, k*sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_KqPt_y, k*sizeof(float))); if (dataInDevice){ d_q = Qt[t]; d_p = Pt[t]; }else{ checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q, Qt[t], k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, Pt[t], k * sizeof(float2), hipMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( KqPtKernel) , dim3(blocks), dim3(threads) , 0, 0, d_q, d_p, x, f, d_KqPt_x, d_KqPt_y, k); v.x = Reducer::reduce_sum_wrapper(k, d_KqPt_x); v.y = Reducer::reduce_sum_wrapper(k, d_KqPt_y); // Clean up if (dataInDevice){ // Do nothing } else { checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_p)); } checkCudaErrors(hipFree(d_KqPt_x)); checkCudaErrors(hipFree(d_KqPt_y)); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Accelerated hqhp and alphabeta void hqhpRestrictedKernel_wrapper(float3 * d_q, float3 * d_p, float3 * d_hq, float3 * d_hp, float * d_ham, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 7 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 512: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 256: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 128: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 64: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } void hqhpRestrictedKernel_wrapper(float2 * d_q, float2 * d_p, float2 * d_hq, float2 * d_hp, float * d_ham, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 5 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 512: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 256: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 128: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 64: hipLaunchKernelGGL(( hqhpRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D_Restricted(float3 * h_q, float3 * h_p, float3 * h_hq, float3 * h_hp, float sigma, int k, int blockSize, bool dataInDevice ){ // Variables float f = -0.5 / (sigma * sigma); float3 * d_q; float3 * d_p; float3 * d_hq; float3 * d_hp; float * d_ham; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float3), hipMemcpyHostToDevice)); } checkCudaErrors(hipMalloc((void **)&d_ham, k*sizeof(float))); // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double hamiltonian_time = sdkGetTimerValue(&hTimer); // //printf("Restricted Hamiltonian takes %f ms.\n", hamiltonian_time); // checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemset(d_hq, 0, k*sizeof(float3))); checkCudaErrors(hipMemset(d_hp, 0, k*sizeof(float3))); checkCudaErrors(hipMemset(d_ham, 0, k*sizeof(float))); hqhpRestrictedKernel_wrapper(d_q, d_p, d_hq, d_hp, d_ham, f, k, blockSize); // Calculate hamiltonian float H = 0.5 * Reducer::reduce_sum_wrapper(k, d_ham); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_p)); checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } checkCudaErrors(hipFree(d_ham)); checkCudaErrors(hipDeviceSynchronize()); return H; } float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D_Restricted(float2 * h_q, float2 * h_p, float2 * h_hq, float2 * h_hp, float sigma, int k, int blockSize, bool dataInDevice ){ // Variables float f = -0.5 / (sigma * sigma); float2 * d_q; float2 * d_p; float2 * d_hq; float2 * d_hp; float * d_ham; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float2), hipMemcpyHostToDevice)); } checkCudaErrors(hipMalloc((void **)&d_ham, k*sizeof(float))); // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double hamiltonian_time = sdkGetTimerValue(&hTimer); // //printf("Restricted Hamiltonian takes %f ms.\n", hamiltonian_time); // checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemset(d_hq, 0, k*sizeof(float2))); checkCudaErrors(hipMemset(d_hp, 0, k*sizeof(float2))); checkCudaErrors(hipMemset(d_ham, 0, k*sizeof(float))); hqhpRestrictedKernel_wrapper(d_q, d_p, d_hq, d_hp, d_ham, f, k, blockSize); // Calculate hamiltonian float H = 0.5 * Reducer::reduce_sum_wrapper(k, d_ham); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_p)); checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } checkCudaErrors(hipFree(d_ham)); checkCudaErrors(hipDeviceSynchronize()); return H; } void alphaBetaRestrictedKernel_wrapper(float3 * d_q, float3 * d_p, float3 * d_alpha, float3 * d_beta, float3 * d_dalpha, float3 * d_dbeta, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 6 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 512: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 256: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 128: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 64: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } void alphaBetaRestrictedKernel_wrapper(float2 * d_q, float2 * d_p, float2 * d_alpha, float2 * d_beta, float2 * d_dalpha, float2 * d_dbeta, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 4 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 512: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 256: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 128: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 64: hipLaunchKernelGGL(( alphaBetaRestrictedKernel) , dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D_Restricted(float3 * h_q, float3 * h_p, float3 * h_alpha, float3 * h_beta, float3 * h_dalpha, float3 * h_dbeta, float sigma, int k, int blockSize, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // variables float3 * d_q; float3 * d_p; float3 * d_alpha; float3 * d_beta; float3 * d_dalpha; float3 * d_dbeta; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; d_dalpha = h_dalpha; d_dbeta = h_dbeta; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_beta, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_dalpha, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_dbeta, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_alpha, h_alpha, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_beta, h_beta, k * sizeof(float3), hipMemcpyHostToDevice)); } // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Restricted alpha_beta takes %f ms.\n", alpha_beta_time); // // stop timer // checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemset(d_dalpha, 0, k*sizeof(float3))); checkCudaErrors(hipMemset(d_dbeta, 0, k*sizeof(float3))); ///////////////////////////////////////////////////////////////////////////////////////// // run the wrapper alphaBetaRestrictedKernel_wrapper(d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { // copy the result back checkCudaErrors(hipMemcpy(h_dalpha, d_dalpha, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_dbeta, d_dbeta, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors( hipFree(d_p) ); checkCudaErrors( hipFree(d_q) ); checkCudaErrors( hipFree(d_alpha) ); checkCudaErrors( hipFree(d_beta) ); checkCudaErrors( hipFree(d_dalpha) ); checkCudaErrors( hipFree(d_dbeta) ); } checkCudaErrors(hipDeviceSynchronize()); } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D_Restricted(float2 * h_q, float2 * h_p, float2 * h_alpha, float2 * h_beta, float2 * h_dalpha, float2 * h_dbeta, float sigma, int k, int blockSize, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // variables float2 * d_q; float2 * d_p; float2 * d_alpha; float2 * d_beta; float2 * d_dalpha; float2 * d_dbeta; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; d_dalpha = h_dalpha; d_dbeta = h_dbeta; } else { checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_beta, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_dalpha, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_dbeta, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q, h_q, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_alpha, h_alpha, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_beta, h_beta, k * sizeof(float2), hipMemcpyHostToDevice)); } // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Restricted alpha_beta takes %f ms.\n", alpha_beta_time); // // stop timer // checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemset(d_dalpha, 0, k*sizeof(float2))); checkCudaErrors(hipMemset(d_dbeta, 0, k*sizeof(float2))); ///////////////////////////////////////////////////////////////////////////////////////// // run the wrapper alphaBetaRestrictedKernel_wrapper(d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { // copy the result back checkCudaErrors(hipMemcpy(h_dalpha, d_dalpha, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_dbeta, d_dbeta, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors( hipFree(d_p) ); checkCudaErrors( hipFree(d_q) ); checkCudaErrors( hipFree(d_alpha) ); checkCudaErrors( hipFree(d_beta) ); checkCudaErrors( hipFree(d_dalpha) ); checkCudaErrors( hipFree(d_dbeta) ); } checkCudaErrors(hipDeviceSynchronize()); } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA3D_Restricted(float3 * h_q0, float3 * h_p0, float3 * h_q1, float3 * h_p1, float3 * h_hq, float3 * h_hp, std::vector<float3*> &Qt, std::vector<float3*> &Pt, float sigma, int k, int N, int blockSize, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float3 * d_q_t; float3 * d_p_t; float3 * d_hq; float3 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(hipMalloc((void **)&d_q_t, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p_t, k*sizeof(float3))); // Some memory control stuff if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float3), hipMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float3), hipMemcpyDeviceToDevice)); } } else { checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float3), hipMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float3), hipMemcpyHostToHost)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float3), hipMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D_Restricted(d_q_t, d_p_t, d_hq, d_hp, sigma, k, blockSize, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_q_t, d_hp, dt, d_q_t, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float3), hipMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float3), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float3), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. Duty to manage mem replies on outside code }else{ checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } checkCudaErrors(hipFree(d_q_t)); checkCudaErrors(hipFree(d_p_t)); checkCudaErrors(hipDeviceSynchronize()); return H0; } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA2D_Restricted(float2 * h_q0, float2 * h_p0, float2 * h_q1, float2 * h_p1, float2 * h_hq, float2 * h_hp, std::vector<float2*> &Qt, std::vector<float2*> &Pt, float sigma, int k, int N, int blockSize, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float2 * d_q_t; float2 * d_p_t; float2 * d_hq; float2 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(hipMalloc((void **)&d_q_t, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p_t, k*sizeof(float2))); // Some memory control stuff if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float2), hipMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float2), hipMemcpyDeviceToDevice)); } } else { checkCudaErrors(hipMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_hp, k*sizeof(float2))); checkCudaErrors(hipMemcpy(d_q_t, h_q0, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p_t, h_p0, k * sizeof(float2), hipMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(hipMemcpy(Qt[0], h_q0, k * sizeof(float2), hipMemcpyHostToHost)); checkCudaErrors(hipMemcpy(Pt[0], h_p0, k * sizeof(float2), hipMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D_Restricted(d_q_t, d_p_t, d_hq, d_hp, sigma, k, blockSize, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_q_t, d_hp, dt, d_q_t, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(Qt[t], d_q_t, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(Pt[t], d_p_t, k * sizeof(float2), hipMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(h_q1, d_q_t, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_p1, d_p_t, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hq, d_hq, k * sizeof(float2), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_hp, d_hp, k * sizeof(float2), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. Duty to manage mem replies on outside code }else{ checkCudaErrors(hipFree(d_hq)); checkCudaErrors(hipFree(d_hp)); } checkCudaErrors(hipFree(d_q_t)); checkCudaErrors(hipFree(d_p_t)); checkCudaErrors(hipDeviceSynchronize()); return H0; } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA3D_Restricted( std::vector<float3*> &Qt, std::vector<float3*> &Pt, const float3 * d_alpha, const float3 * d_beta, float3 * d_result, float sigma, int k, int N, int blockSize, bool dataInDevice){ // Variables float3 * d_a; float3 * d_b; float3 * d_Da; float3 * d_Db; float3 * d_q; float3 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_a, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_b, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_Da, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_Db, k*sizeof(float3))); if (dataInDevice){ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float3), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float3))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(hipMemcpy(d_q, Qt[t - 1], k * sizeof(float3), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, Pt[t - 1], k * sizeof(float3), hipMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D_Restricted(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, blockSize, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_a, d_Da, dt, d_a, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float3), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float3), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_p)); } checkCudaErrors(hipFree(d_a)); checkCudaErrors(hipFree(d_b)); checkCudaErrors(hipFree(d_Da)); checkCudaErrors(hipFree(d_Db)); checkCudaErrors(hipDeviceSynchronize()); } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA2D_Restricted( std::vector<float2*> &Qt, std::vector<float2*> &Pt, const float2 * d_alpha, const float2 * d_beta, float2 * d_result, float sigma, int k, int N, int blockSize, bool dataInDevice){ // Variables float2 * d_a; float2 * d_b; float2 * d_Da; float2 * d_Db; float2 * d_q; float2 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(hipMalloc((void **)&d_a, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_b, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_Da, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_Db, k*sizeof(float2))); if (dataInDevice){ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float2), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_a, d_alpha, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_b, d_beta, k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(hipMalloc((void **)&d_p, k*sizeof(float2))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(hipMemcpy(d_q, Qt[t - 1], k * sizeof(float2), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, Pt[t - 1], k * sizeof(float2), hipMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D_Restricted(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, blockSize, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_a, d_Da, dt, d_a, k); hipLaunchKernelGGL(( updateKernel1D) , dim3(blocks), dim3(threads) , 0, 0, d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float2), hipMemcpyDeviceToDevice)); }else{ checkCudaErrors(hipMemcpy(d_result, d_b, k * sizeof(float2), hipMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(hipFree(d_q)); checkCudaErrors(hipFree(d_p)); } checkCudaErrors(hipFree(d_a)); checkCudaErrors(hipFree(d_b)); checkCudaErrors(hipFree(d_Da)); checkCudaErrors(hipFree(d_Db)); checkCudaErrors(hipDeviceSynchronize()); }
3d8a5bdb6b3d6935c398106f1dcea125520a75a7.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include "helper_functions.h" #include "helper_cuda.h" #include <stdio.h> #include "../include/hamiltonian.h" #include "hamiltonian_kernel.cu" #include "../include/reducer.h" void checkCublasErrors(cublasStatus_t ret){ if (ret != CUBLAS_STATUS_SUCCESS){ printf("cublasCreate returned error code %d, line(%d)\n", ret, __LINE__); exit(-1); } } /* Actual CUDA functions */ ///////////////////////////////////////// 3D functions////////////////////////////////////////////// float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D(float3 * h_q, float3 * h_p, float3 * h_hq, float3 * h_hp, float9 * h_hqq, float9 * h_hqp, float9 * h_hpp, float sigma, int k, bool flag_hessian, bool dataInDevice ){ // Parameters float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // Initialize cublas cublasHandle_t handle; checkCublasErrors( cublasCreate(&handle) ); // cublasOperation_t trans = CUBLAS_OP_N; // AT if transa == CUBLAS_OP_T cublasOperation_t trans = CUBLAS_OP_T; // AT if transa == CUBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff float3 * d_q; float3 * d_p; if (dataInDevice){ d_q = h_q; d_p = h_p; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float3), cudaMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // Initialize hamiltonian float H = 0.0; // allocate the memory float * d_pi_pj; float * d_pi_pj_g; float3 * d_dq; float * d_g; checkCudaErrors(cudaMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_dq, k2*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_g, k2*sizeof(float))); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dqpipjKernel <<< blocks, threads >>> (d_q, d_dq, d_g, f, d_p, d_pi_pj, k); multiplyKernel2D <<< blocks, threads >>> (d_pi_pj, d_g, d_pi_pj_g, k, k); float * h_pi_pj_g = new float[k2]; // Calculate H H = 0.5 * Reducer::reduce_sum_wrapper(k2, d_pi_pj_g); checkCudaErrors(cudaFree(d_pi_pj_g)); // Calculate the 1st derivative //printf("Calculating 1st derivative...\n"); float * d_pi_pj_g1_dq_x; float * d_pi_pj_g1_dq_y; float * d_pi_pj_g1_dq_z; float * d_p_g_x; float * d_p_g_y; float * d_p_g_z; checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g1_dq_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g1_dq_y, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g1_dq_z, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_p_g_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_p_g_y, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_p_g_z, k2*sizeof(float))); // Precompute the terms that need to be added up threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hqhpPreComputeKernel <<< blocks, threads >>> ( d_pi_pj, d_dq, d_g, d_p, f, d_pi_pj_g1_dq_x, d_pi_pj_g1_dq_y, d_pi_pj_g1_dq_z, d_p_g_x, d_p_g_y, d_p_g_z, k); float * d_one; checkCudaErrors(cudaMalloc((void **)&d_one, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); onesKernel1D <<< blocks, threads >>> (d_one, k); // Allocate the memory float * d_hq_x; float * d_hq_y; float * d_hq_z; float * d_hp_x; float * d_hp_y; float * d_hp_z; checkCudaErrors(cudaMalloc((void **)&d_hq_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hq_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hq_z, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hp_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hp_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hp_z, k*sizeof(float))); // Use CUBLAS to multiply the terms by one vector to add up checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_x, k, d_one, 1, &beta, d_hq_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_y, k, d_one, 1, &beta, d_hq_y, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_z, k, d_one, 1, &beta, d_hq_z, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_p_g_x, k, d_one, 1, &beta, d_hp_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_p_g_y, k, d_one, 1, &beta, d_hp_y, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_p_g_z, k, d_one, 1, &beta, d_hp_z, 1) ); // clean up checkCudaErrors(cudaFree(d_pi_pj_g1_dq_x)); checkCudaErrors(cudaFree(d_pi_pj_g1_dq_y)); checkCudaErrors(cudaFree(d_pi_pj_g1_dq_z)); checkCudaErrors(cudaFree(d_p_g_x)); checkCudaErrors(cudaFree(d_p_g_y)); checkCudaErrors(cudaFree(d_p_g_z)); // TODO: copy the result back to host float3 * d_hq; float3 * d_hp; if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float3))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); Float2Float3Kernel1D <<< blocks, threads >>> ( d_hq_x, d_hq_y, d_hq_z, d_hq, k); Float2Float3Kernel1D <<< blocks, threads >>> ( d_hp_x, d_hp_y, d_hp_z, d_hp, k); checkCudaErrors(cudaFree(d_hq_x)); checkCudaErrors(cudaFree(d_hq_y)); checkCudaErrors(cudaFree(d_hq_z)); checkCudaErrors(cudaFree(d_hp_x)); checkCudaErrors(cudaFree(d_hp_y)); checkCudaErrors(cudaFree(d_hp_z)); // Some memory control stuff if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } //printf("Done 1st derivative.\n"); // Calculate the 2nd derivatives if (flag_hessian){ //printf("Calculating 2nd derivative...\n"); //printf("Calculating hqq...\n"); /////////////////////////////////////////////////////////////////////////////////////////////////////// /* hqq */ float * d_hqq_xx; float * d_hqq_xy; float * d_hqq_xz; float * d_hqq_yx; float * d_hqq_yy; float * d_hqq_yz; float * d_hqq_zx; float * d_hqq_zy; float * d_hqq_zz; // Allocate memory //printf("hqq: Allocating mem...\n"); checkCudaErrors(cudaMalloc((void **)&d_hqq_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_xy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_xz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_yx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_yy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_yz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_zx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_zy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_zz, k2*sizeof(float))); // Precompute the terms //printf("hqq: Precomputing...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hqqPreComputeKernel <<< blocks, threads >>> (d_pi_pj, d_g, d_dq, f, d_hqq_xx, d_hqq_xy, d_hqq_xz, d_hqq_yx, d_hqq_yy, d_hqq_yz, d_hqq_zx, d_hqq_zy, d_hqq_zz, k); // The diagonal terms need sum - again use cublas float * d_hqq_diag_xx; float * d_hqq_diag_xy; float * d_hqq_diag_xz; float * d_hqq_diag_yx; float * d_hqq_diag_yy; float * d_hqq_diag_yz; float * d_hqq_diag_zx; float * d_hqq_diag_zy; float * d_hqq_diag_zz; checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_xx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_xy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_xz, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_yx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_yy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_yz, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_zx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_zy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_zz, k*sizeof(float))); // cublas sum //printf("hqq: cublas sum...\n"); float * d_mone; checkCudaErrors(cudaMalloc((void **)&d_mone, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); fillKernel1D <<< blocks, threads >>> (d_mone, k, -1 ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_xx, k, d_mone, 1, &beta, d_hqq_diag_xx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_xy, k, d_mone, 1, &beta, d_hqq_diag_xy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_xz, k, d_mone, 1, &beta, d_hqq_diag_xz, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_yx, k, d_mone, 1, &beta, d_hqq_diag_yx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_yy, k, d_mone, 1, &beta, d_hqq_diag_yy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_yz, k, d_mone, 1, &beta, d_hqq_diag_yz, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_zx, k, d_mone, 1, &beta, d_hqq_diag_zx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_zy, k, d_mone, 1, &beta, d_hqq_diag_zy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_zz, k, d_mone, 1, &beta, d_hqq_diag_zz, 1) ); checkCudaErrors(cudaFree(d_mone)); // Copy the diagonal terms into the matrix //printf("hqq: copy diagonal term...\n"); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); copyToDiagonal <<< blocks, threads >>> (d_hqq_xx, d_hqq_diag_xx, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_xy, d_hqq_diag_xy, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_xz, d_hqq_diag_xz, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_yx, d_hqq_diag_yx, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_yy, d_hqq_diag_yy, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_yz, d_hqq_diag_yz, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_zx, d_hqq_diag_zx, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_zy, d_hqq_diag_zy, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_zz, d_hqq_diag_zz, k); checkCudaErrors(cudaFree(d_hqq_diag_xx)); checkCudaErrors(cudaFree(d_hqq_diag_xy)); checkCudaErrors(cudaFree(d_hqq_diag_xz)); checkCudaErrors(cudaFree(d_hqq_diag_yx)); checkCudaErrors(cudaFree(d_hqq_diag_yy)); checkCudaErrors(cudaFree(d_hqq_diag_yz)); checkCudaErrors(cudaFree(d_hqq_diag_zx)); checkCudaErrors(cudaFree(d_hqq_diag_zy)); checkCudaErrors(cudaFree(d_hqq_diag_zz)); // copy the result back to host //printf("hqq: copy back the result...\n"); float9 * d_hqq; if (dataInDevice){ d_hqq = h_hqq; }else{ checkCudaErrors(cudaMalloc((void **)&d_hqq, k2*sizeof(float9))); } //printf("hqq: Done allocate memory...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); Float2Float9Kernel2D <<< blocks, threads >>> ( d_hqq_xx, d_hqq_xy, d_hqq_xz, d_hqq_yx, d_hqq_yy, d_hqq_yz, d_hqq_zx, d_hqq_zy, d_hqq_zz, d_hqq, k); //printf("hqq: Done copy 9 float to float9...\n"); checkCudaErrors(cudaFree(d_hqq_xx)); checkCudaErrors(cudaFree(d_hqq_xy)); checkCudaErrors(cudaFree(d_hqq_xz)); checkCudaErrors(cudaFree(d_hqq_yx)); checkCudaErrors(cudaFree(d_hqq_yy)); checkCudaErrors(cudaFree(d_hqq_yz)); checkCudaErrors(cudaFree(d_hqq_zx)); checkCudaErrors(cudaFree(d_hqq_zy)); checkCudaErrors(cudaFree(d_hqq_zz)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_hqq, d_hqq, k2 * sizeof(float9), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hqq)); } //printf("hqq: Done copy back to host...\n"); //printf("Done hqq.\n"); //printf("Calculating hqp...\n"); //////////////////////////////////////////////////////////////////////////////////////////////////// /* hqp */ float * d_hqp_xx; float * d_hqp_xy; float * d_hqp_xz; float * d_hqp_yx; float * d_hqp_yy; float * d_hqp_yz; float * d_hqp_zx; float * d_hqp_zy; float * d_hqp_zz; float * d_hqp_ii_xx; float * d_hqp_ii_xy; float * d_hqp_ii_xz; float * d_hqp_ii_yx; float * d_hqp_ii_yy; float * d_hqp_ii_yz; float * d_hqp_ii_zx; float * d_hqp_ii_zy; float * d_hqp_ii_zz; // Allocate memory checkCudaErrors(cudaMalloc((void **)&d_hqp_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_xy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_xz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_yx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_yy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_yz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_zx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_zy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_zz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_xy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_xz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_yx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_yy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_yz, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_zx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_zy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_zz, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hqpPreComputeKernel <<< blocks, threads >>> (d_p, d_g, f, d_dq, d_hqp_xx, d_hqp_xy, d_hqp_xz, d_hqp_yx, d_hqp_yy, d_hqp_yz, d_hqp_zx, d_hqp_zy, d_hqp_zz, d_hqp_ii_xx, d_hqp_ii_xy, d_hqp_ii_xz, d_hqp_ii_yx, d_hqp_ii_yy, d_hqp_ii_yz, d_hqp_ii_zx, d_hqp_ii_zy, d_hqp_ii_zz, k); // The diagonal terms need sum - again use cublas float * d_hqp_diag_xx; float * d_hqp_diag_xy; float * d_hqp_diag_xz; float * d_hqp_diag_yx; float * d_hqp_diag_yy; float * d_hqp_diag_yz; float * d_hqp_diag_zx; float * d_hqp_diag_zy; float * d_hqp_diag_zz; checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_xx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_xy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_xz, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_yx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_yy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_yz, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_zx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_zy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_zz, k*sizeof(float))); // cublas sum checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xx, k, d_one, 1, &beta, d_hqp_diag_xx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xy, k, d_one, 1, &beta, d_hqp_diag_xy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xz, k, d_one, 1, &beta, d_hqp_diag_xz, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yx, k, d_one, 1, &beta, d_hqp_diag_yx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yy, k, d_one, 1, &beta, d_hqp_diag_yy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yz, k, d_one, 1, &beta, d_hqp_diag_yz, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_zx, k, d_one, 1, &beta, d_hqp_diag_zx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_zy, k, d_one, 1, &beta, d_hqp_diag_zy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_zz, k, d_one, 1, &beta, d_hqp_diag_zz, 1) ); // Release checkCudaErrors(cudaFree(d_hqp_ii_xx)); checkCudaErrors(cudaFree(d_hqp_ii_xy)); checkCudaErrors(cudaFree(d_hqp_ii_xz)); checkCudaErrors(cudaFree(d_hqp_ii_yx)); checkCudaErrors(cudaFree(d_hqp_ii_yy)); checkCudaErrors(cudaFree(d_hqp_ii_yz)); checkCudaErrors(cudaFree(d_hqp_ii_zx)); checkCudaErrors(cudaFree(d_hqp_ii_zy)); checkCudaErrors(cudaFree(d_hqp_ii_zz)); // copy the diagonal terms into the matrix threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); copyToDiagonal <<< blocks, threads >>> (d_hqp_xx, d_hqp_diag_xx, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_xy, d_hqp_diag_xy, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_xz, d_hqp_diag_xz, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_yx, d_hqp_diag_yx, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_yy, d_hqp_diag_yy, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_yz, d_hqp_diag_yz, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_zx, d_hqp_diag_zx, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_zy, d_hqp_diag_zy, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_zz, d_hqp_diag_zz, k); checkCudaErrors(cudaFree(d_hqp_diag_xx)); checkCudaErrors(cudaFree(d_hqp_diag_xy)); checkCudaErrors(cudaFree(d_hqp_diag_xz)); checkCudaErrors(cudaFree(d_hqp_diag_yx)); checkCudaErrors(cudaFree(d_hqp_diag_yy)); checkCudaErrors(cudaFree(d_hqp_diag_yz)); checkCudaErrors(cudaFree(d_hqp_diag_zx)); checkCudaErrors(cudaFree(d_hqp_diag_zy)); checkCudaErrors(cudaFree(d_hqp_diag_zz)); // copy the result back to host float9 * d_hqp; if (dataInDevice){ d_hqp = h_hqp; }else{ checkCudaErrors(cudaMalloc((void **)&d_hqp, k2*sizeof(float9))); } threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); Float2Float9Kernel2D <<< blocks, threads >>> ( d_hqp_xx, d_hqp_xy, d_hqp_xz, d_hqp_yx, d_hqp_yy, d_hqp_yz, d_hqp_zx, d_hqp_zy, d_hqp_zz, d_hqp, k); checkCudaErrors(cudaFree(d_hqp_xx)); checkCudaErrors(cudaFree(d_hqp_xy)); checkCudaErrors(cudaFree(d_hqp_xz)); checkCudaErrors(cudaFree(d_hqp_yx)); checkCudaErrors(cudaFree(d_hqp_yy)); checkCudaErrors(cudaFree(d_hqp_yz)); checkCudaErrors(cudaFree(d_hqp_zx)); checkCudaErrors(cudaFree(d_hqp_zy)); checkCudaErrors(cudaFree(d_hqp_zz)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_hqp, d_hqp, k2 * sizeof(float9), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hqp)); } //printf("Done hqp.\n"); //printf("Calculating hpp...\n"); //////////////////////////////////////////////////////////////////////////////////////////// /* hpp */ float * d_hpp_xx; float * d_hpp_yy; float * d_hpp_zz; checkCudaErrors(cudaMalloc((void **)&d_hpp_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hpp_yy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hpp_zz, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hppPreComputeKernel <<< blocks, threads >>> (d_g, d_hpp_xx, d_hpp_yy, d_hpp_zz, k); // copy the result back to host float * d_zero; checkCudaErrors(cudaMalloc((void **)&d_zero, k2*sizeof(float))); checkCudaErrors(cudaMemset(d_zero, 0, k2*sizeof(float))); float9 * d_hpp; if (dataInDevice){ d_hpp = h_hpp; }else{ checkCudaErrors(cudaMalloc((void **)&d_hpp, k2*sizeof(float9))); } Float2Float9Kernel2D <<< blocks, threads >>> ( d_hpp_xx, d_zero, d_zero, d_zero, d_hpp_yy, d_zero, d_zero, d_zero, d_hpp_zz, d_hpp, k); checkCudaErrors(cudaFree(d_zero)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_hpp, d_hpp, k2 * sizeof(float9), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hpp)); } //printf("Done hpp.\n"); //printf("Done 2nd derivative.\n"); } checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double hamiltonian_time = sdkGetTimerValue(&hTimer); //printf("Hamiltonian takes %f ms.\n", hamiltonian_time); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( cudaFree(d_p) ); checkCudaErrors( cudaFree(d_q) ); } checkCudaErrors(cudaFree(d_pi_pj)); checkCudaErrors(cudaFree(d_dq)); checkCudaErrors(cudaFree(d_g)); checkCudaErrors(cudaFree(d_one)); checkCublasErrors( cublasDestroy(handle) ); checkCudaErrors(cudaDeviceSynchronize()); return H; } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D(float3 * h_q, float3 * h_p, float3 * h_alpha, float3 * h_beta, float3 * h_dalpha, float3 * h_dbeta, float sigma, int k, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // variables float3 * d_q; float3 * d_p; float3 * d_alpha; float3 * d_beta; // Initialize cublas cublasHandle_t handle; checkCublasErrors( cublasCreate(&handle) ); // cublasOperation_t trans = CUBLAS_OP_N; // AT if transa == CUBLAS_OP_T cublasOperation_t trans = CUBLAS_OP_T; // AT if transa == CUBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_beta, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_alpha, h_alpha, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_beta, h_beta, k * sizeof(float3), cudaMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // allocate the memory for these intermediate variables float * d_pi_pj; float3 * d_dq; float * d_g; float * d_one; checkCudaErrors(cudaMalloc((void **)&d_one, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_dq, k2*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_g, k2*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); onesKernel1D <<< blocks, threads >>> (d_one, k); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dqpipjKernel <<< blocks, threads >>> (d_q, d_dq, d_g, f, d_p, d_pi_pj, k); // Calculate the dj-di threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); float3 * d_dbji; checkCudaErrors(cudaMalloc((void **)&d_dbji, k2*sizeof(float3))); dbjiKernel <<< blocks, threads >>> ( d_beta, d_dbji, k ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dalpha */ // Precompute for the da and aa terms float * d_da_pre_x; float * d_da_pre_y; float * d_da_pre_z; checkCudaErrors(cudaMalloc((void **)&d_da_pre_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_da_pre_y, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_da_pre_z, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dalphaPrecomputeKernel <<< blocks, threads >>> (d_pi_pj, d_dq, d_g, d_dbji, f, k, d_da_pre_x, d_da_pre_y, d_da_pre_z, d_p, d_alpha); // Use cublas to sum float * d_da_x; float * d_da_y; float * d_da_z; checkCudaErrors(cudaMalloc((void **)&d_da_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_da_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_da_z, k*sizeof(float))); // cublas m * v checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_da_pre_x, k, d_one, 1, &beta, d_da_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_da_pre_y, k, d_one, 1, &beta, d_da_y, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_da_pre_z, k, d_one, 1, &beta, d_da_z, 1) ); checkCudaErrors( cudaFree(d_da_pre_x) ); checkCudaErrors( cudaFree(d_da_pre_y) ); checkCudaErrors( cudaFree(d_da_pre_z) ); // 3 float to float3 float3 * d_dalpha; if (dataInDevice){ d_dalpha = h_dalpha; } else { checkCudaErrors(cudaMalloc((void **)&d_dalpha, k*sizeof(float3))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); Float2Float3Kernel1D <<< blocks, threads >>> ( d_da_x, d_da_y, d_da_z, d_dalpha, k); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_dalpha, d_dalpha, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaFree(d_dalpha) ); } checkCudaErrors( cudaFree(d_da_x) ); checkCudaErrors( cudaFree(d_da_y) ); checkCudaErrors( cudaFree(d_da_z) ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dbeta */ // precompute float * d_db_pre_x; float * d_db_pre_y; float * d_db_pre_z; checkCudaErrors(cudaMalloc((void **)&d_db_pre_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_db_pre_y, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_db_pre_z, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dbetaPrecomputeKernel <<< blocks, threads >>> ( d_p, d_dq, d_g, d_dbji, f, k, d_db_pre_x, d_db_pre_y, d_db_pre_z, d_alpha); // Use cublas to sum float * d_db_x; float * d_db_y; float * d_db_z; checkCudaErrors(cudaMalloc((void **)&d_db_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_db_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_db_z, k*sizeof(float))); // cublas m * v checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_db_pre_x, k, d_one, 1, &beta, d_db_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_db_pre_y, k, d_one, 1, &beta, d_db_y, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_db_pre_z, k, d_one, 1, &beta, d_db_z, 1) ); checkCudaErrors( cudaFree(d_db_pre_x) ); checkCudaErrors( cudaFree(d_db_pre_y) ); checkCudaErrors( cudaFree(d_db_pre_z) ); // 3 float to float3 float3 * d_dbeta; if (dataInDevice){ d_dbeta = h_dbeta; } else { checkCudaErrors(cudaMalloc((void **)&d_dbeta, k*sizeof(float3))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); Float2Float3Kernel1D <<< blocks, threads >>> ( d_db_x, d_db_y, d_db_z, d_dbeta, k); // add the alpha term threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); addKernel1D <<< blocks, threads >>> (d_dbeta, d_alpha, d_dbeta, k ); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_dbeta, d_dbeta, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaFree(d_dbeta) ); } checkCudaErrors( cudaFree(d_db_x) ); checkCudaErrors( cudaFree(d_db_y) ); checkCudaErrors( cudaFree(d_db_z) ); // stop timer checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Alpha_beta takes %f ms.\n", alpha_beta_time); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( cudaFree(d_p) ); checkCudaErrors( cudaFree(d_q) ); checkCudaErrors( cudaFree(d_alpha) ); checkCudaErrors( cudaFree(d_beta) ); } checkCudaErrors(cudaFree(d_pi_pj)); checkCudaErrors(cudaFree(d_dq)); checkCudaErrors(cudaFree(d_g)); checkCudaErrors(cudaFree(d_dbji)); checkCudaErrors(cudaFree(d_one)); checkCublasErrors( cublasDestroy(handle) ); checkCudaErrors(cudaDeviceSynchronize()); } float PointSetHamiltonianSystem_CUDA::landmarkError_CUDA3D(float3 * h_q1, float3 * h_qT, float3 * h_alpha, int k, bool dataInDevice){ // Variables float3 * d_q1; float3 * d_qT; float3 * d_alpha; float * d_alpha_mag; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_alpha_mag, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_alpha = h_alpha; }else{ checkCudaErrors(cudaMalloc((void **)&d_q1, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_qT, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q1, h_q1, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_qT, h_qT, k * sizeof(float3), cudaMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); minusAndMagKernel1D <<< blocks, threads >>> (d_q1, d_qT, d_alpha, d_alpha_mag, k); float fnorm_sq = Reducer::reduce_sum_wrapper(k, d_alpha_mag); // Clean up checkCudaErrors(cudaFree(d_alpha_mag)); if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_alpha, d_alpha, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_q1)); checkCudaErrors(cudaFree(d_qT)); checkCudaErrors(cudaFree(d_alpha)); } return fnorm_sq; } void PointSetHamiltonianSystem_CUDA::combineGradient_CUDA3D(float3 * h_grad, float3 * h_hp, int k, float lambda, bool dataInDevice){ // Variables float3 * d_grad; float3 * d_hp; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_grad = h_grad; d_hp = h_hp; }else{ checkCudaErrors(cudaMalloc((void **)&d_grad, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_grad, h_grad, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_hp, h_grad, k * sizeof(float3), cudaMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_hp, d_grad, lambda, d_grad, k); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_grad, d_grad, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_grad)); checkCudaErrors(cudaFree(d_hp)); } } void PointSetHamiltonianSystem_CUDA::initP_CUDA3D(float3 * h_q0, float3 * h_qT, float3 * h_p0, int N, int k, bool dataInDevice){ // Variables float3 * d_q0; float3 * d_qT; float3 * d_p0; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_q0 = h_q0; d_qT = h_qT; d_p0 = h_p0; }else{ checkCudaErrors(cudaMalloc((void **)&d_q0, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_qT, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p0, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q0, h_q0, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_qT, h_qT, k * sizeof(float3), cudaMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); minusAndDivideKernel1D <<< blocks, threads >>> (d_qT, d_q0, d_p0, (float) N, k ); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_p0, d_p0, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_q0)); checkCudaErrors(cudaFree(d_qT)); checkCudaErrors(cudaFree(d_p0)); } } void PointSetHamiltonianSystem_CUDA::GAlphaBeta_CUDA3D(float3 * h_q1, float3 * h_qT, float3 * h_p1, float3 * h_alpha, float3 * h_beta, float &Gnorm_sq, float &dsq, float lambda, int k, bool dataInDevice ){ // Variables float3 * d_q1; float3 * d_qT; float3 * d_p1; float3 * d_alpha; float3 * d_beta; float * d_gnsq; float * d_dsq; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_gnsq, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_dsq, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_p1 = h_p1; d_alpha = h_alpha; d_beta = h_beta; }else{ checkCudaErrors(cudaMalloc((void **)&d_q1, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_qT, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p1, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_beta, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q1, h_q1, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_qT, h_qT, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p1, h_p1, k * sizeof(float3), cudaMemcpyHostToDevice)); } // Compute G and alpha/beta threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); GAlphaBetaKernel <<< blocks, threads >>> (d_q1, d_qT, d_p1, d_alpha, d_beta, d_gnsq, d_dsq, lambda, k); Gnorm_sq = Reducer::reduce_sum_wrapper(k, d_gnsq); dsq = Reducer::reduce_sum_wrapper(k, d_dsq); // Clean up checkCudaErrors(cudaFree(d_gnsq)); checkCudaErrors(cudaFree(d_dsq)); if (dataInDevice){ // Do nothing. Duty of managing memory reply on outside code }else{ checkCudaErrors(cudaMemcpy(h_alpha, d_alpha, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_beta, d_beta, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_q1)); checkCudaErrors(cudaFree(d_qT)); checkCudaErrors(cudaFree(d_p1)); checkCudaErrors(cudaFree(d_alpha)); checkCudaErrors(cudaFree(d_beta)); } } void PointSetHamiltonianSystem_CUDA::FlowHamiltonianWithGradient_CUDA3D( std::vector<float3*> &Qt, std::vector<float3*> &Pt, float3 * h_q0, float3 * h_p0, float3 * h_q, float3 * h_p, float9 * h_grad_q, float9 * h_grad_p, int N, int k, bool dataInDevice){ } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA3D(float3 * h_q0, float3 * h_p0, float3 * h_q1, float3 * h_p1, float3 * h_hq, float3 * h_hp, std::vector<float3*> &Qt, std::vector<float3*> &Pt, float sigma, int k, int N, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float3 * d_q_t; float3 * d_p_t; float3 * d_hq; float3 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(cudaMalloc((void **)&d_q_t, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p_t, k*sizeof(float3))); // Some memory control stuff if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); } } else { checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float3), cudaMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float3), cudaMemcpyHostToHost)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float3), cudaMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D(d_q_t, d_p_t, d_hq, d_hp, NULL, NULL, NULL, sigma, k, false, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_q_t, d_hp, dt, d_q_t, k); updateKernel1D <<< blocks, threads >>> (d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float3), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. Duty to manage mem replies on outside code }else{ checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } checkCudaErrors(cudaFree(d_q_t)); checkCudaErrors(cudaFree(d_p_t)); return H0; } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA3D( std::vector<float3*> &Qt, std::vector<float3*> &Pt, const float3 * d_alpha, const float3 * d_beta, float3 * d_result, float sigma, int k, int N, bool dataInDevice){ // Variables float3 * d_a; float3 * d_b; float3 * d_Da; float3 * d_Db; float3 * d_q; float3 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_a, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_b, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_Da, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_Db, k*sizeof(float3))); if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(cudaMemcpy(d_q, Qt[t - 1], k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, Pt[t - 1], k * sizeof(float3), cudaMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_a, d_Da, dt, d_a, k); updateKernel1D <<< blocks, threads >>> (d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float3), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_p)); } checkCudaErrors(cudaFree(d_a)); checkCudaErrors(cudaFree(d_b)); checkCudaErrors(cudaFree(d_Da)); checkCudaErrors(cudaFree(d_Db)); } void PointSetHamiltonianSystem_CUDA::InterpolateVelocity_CUDA3D(int t, const float3 x, float3 &v, std::vector<float3*> &Qt, std::vector<float3*> &Pt, float sigma, int k, bool dataInDevice){ // Variables float f = -0.5 / (sigma * sigma); dim3 threads; dim3 blocks; float3 * d_q; float3 * d_p; float * d_KqPt_x; float * d_KqPt_y; float * d_KqPt_z; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_KqPt_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_KqPt_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_KqPt_z, k*sizeof(float))); if (dataInDevice){ d_q = Qt[t]; d_p = Pt[t]; }else{ checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q, Qt[t], k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, Pt[t], k * sizeof(float3), cudaMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); KqPtKernel <<< blocks, threads >>> ( d_q, d_p, x, f, d_KqPt_x, d_KqPt_y, d_KqPt_z, k); v.x = Reducer::reduce_sum_wrapper(k, d_KqPt_x); v.y = Reducer::reduce_sum_wrapper(k, d_KqPt_y); v.z = Reducer::reduce_sum_wrapper(k, d_KqPt_z); // Clean up if (dataInDevice){ // Do nothing } else { checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_p)); } checkCudaErrors(cudaFree(d_KqPt_x)); checkCudaErrors(cudaFree(d_KqPt_y)); checkCudaErrors(cudaFree(d_KqPt_z)); } ///////////////////////////////////////// 2D functions////////////////////////////////////////////// float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D(float2 * h_q, float2 * h_p, float2 * h_hq, float2 * h_hp, float4 * h_hqq, float4 * h_hqp, float4 * h_hpp, float sigma, int k, bool flag_hessian, bool dataInDevice ){ // Parameters float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // Initialize cublas cublasHandle_t handle; checkCublasErrors( cublasCreate(&handle) ); // cublasOperation_t trans = CUBLAS_OP_N; // AT if transa == CUBLAS_OP_T cublasOperation_t trans = CUBLAS_OP_T; // AT if transa == CUBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff float2 * d_q; float2 * d_p; if (dataInDevice){ d_q = h_q; d_p = h_p; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q, h_q, k*sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k*sizeof(float2), cudaMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // Initialize hamiltonian float H = 0.0; // allocate the memory float * d_pi_pj; float * d_pi_pj_g; float2 * d_dq; float * d_g; checkCudaErrors(cudaMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_dq, k2*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_g, k2*sizeof(float))); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dqpipjKernel <<< blocks, threads >>> (d_q, d_dq, d_g, f, d_p, d_pi_pj, k); multiplyKernel2D <<< blocks, threads >>> (d_pi_pj, d_g, d_pi_pj_g, k, k); float * h_pi_pj_g = new float[k2]; // Calculate H H = 0.5 * Reducer::reduce_sum_wrapper(k2, d_pi_pj_g); checkCudaErrors(cudaFree(d_pi_pj_g)); // Calculate the 1st derivative //printf("Calculating 1st derivative...\n"); float * d_pi_pj_g1_dq_x; float * d_pi_pj_g1_dq_y; float * d_p_g_x; float * d_p_g_y; checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g1_dq_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pi_pj_g1_dq_y, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_p_g_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_p_g_y, k2*sizeof(float))); // Precompute the terms that need to be added up threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hqhpPreComputeKernel <<< blocks, threads >>> ( d_pi_pj, d_dq, d_g, d_p, f, d_pi_pj_g1_dq_x, d_pi_pj_g1_dq_y, d_p_g_x, d_p_g_y, k); float * d_one; checkCudaErrors(cudaMalloc((void **)&d_one, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); onesKernel1D <<< blocks, threads >>> (d_one, k); // Allocate the memory float * d_hq_x; float * d_hq_y; float * d_hq_z; float * d_hp_x; float * d_hp_y; float * d_hp_z; checkCudaErrors(cudaMalloc((void **)&d_hq_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hq_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hq_z, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hp_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hp_y, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hp_z, k*sizeof(float))); // Use CUBLAS to multiply the terms by one vector to add up checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_x, k, d_one, 1, &beta, d_hq_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_pi_pj_g1_dq_y, k, d_one, 1, &beta, d_hq_y, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_p_g_x, k, d_one, 1, &beta, d_hp_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_p_g_y, k, d_one, 1, &beta, d_hp_y, 1) ); // clean up checkCudaErrors(cudaFree(d_pi_pj_g1_dq_x)); checkCudaErrors(cudaFree(d_pi_pj_g1_dq_y)); checkCudaErrors(cudaFree(d_p_g_x)); checkCudaErrors(cudaFree(d_p_g_y)); // TODO: copy the result back to host float2 * d_hq; float2 * d_hp; if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float2))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); Float2Float2Kernel1D <<< blocks, threads >>> ( d_hq_x, d_hq_y, d_hq, k); Float2Float2Kernel1D <<< blocks, threads >>> ( d_hp_x, d_hp_y, d_hp, k); checkCudaErrors(cudaFree(d_hq_x)); checkCudaErrors(cudaFree(d_hq_y)); checkCudaErrors(cudaFree(d_hp_x)); checkCudaErrors(cudaFree(d_hp_y)); // Some memory control stuff if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } //printf("Done 1st derivative.\n"); // Calculate the 2nd derivatives if (flag_hessian){ //printf("Calculating 2nd derivative...\n"); //printf("Calculating hqq...\n"); /////////////////////////////////////////////////////////////////////////////////////////////////////// /* hqq */ float * d_hqq_xx; float * d_hqq_xy; float * d_hqq_yx; float * d_hqq_yy; // Allocate memory //printf("hqq: Allocating mem...\n"); checkCudaErrors(cudaMalloc((void **)&d_hqq_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_xy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_yx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_yy, k2*sizeof(float))); // Precompute the terms //printf("hqq: Precomputing...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hqqPreComputeKernel <<< blocks, threads >>> (d_pi_pj, d_g, d_dq, f, d_hqq_xx, d_hqq_xy, d_hqq_yx, d_hqq_yy, k); // The diagonal terms need sum - again use cublas float * d_hqq_diag_xx; float * d_hqq_diag_xy; float * d_hqq_diag_yx; float * d_hqq_diag_yy; checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_xx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_xy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_yx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqq_diag_yy, k*sizeof(float))); // cublas sum //printf("hqq: cublas sum...\n"); float * d_mone; checkCudaErrors(cudaMalloc((void **)&d_mone, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); fillKernel1D <<< blocks, threads >>> (d_mone, k, -1 ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_xx, k, d_mone, 1, &beta, d_hqq_diag_xx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_xy, k, d_mone, 1, &beta, d_hqq_diag_xy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_yx, k, d_mone, 1, &beta, d_hqq_diag_yx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqq_yy, k, d_mone, 1, &beta, d_hqq_diag_yy, 1) ); checkCudaErrors(cudaFree(d_mone)); // Copy the diagonal terms into the matrix //printf("hqq: copy diagonal term...\n"); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); copyToDiagonal <<< blocks, threads >>> (d_hqq_xx, d_hqq_diag_xx, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_xy, d_hqq_diag_xy, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_yx, d_hqq_diag_yx, k); copyToDiagonal <<< blocks, threads >>> (d_hqq_yy, d_hqq_diag_yy, k); checkCudaErrors(cudaFree(d_hqq_diag_xx)); checkCudaErrors(cudaFree(d_hqq_diag_xy)); checkCudaErrors(cudaFree(d_hqq_diag_yx)); checkCudaErrors(cudaFree(d_hqq_diag_yy)); // copy the result back to host //printf("hqq: copy back the result...\n"); float4 * d_hqq; if (dataInDevice){ d_hqq = h_hqq; }else{ checkCudaErrors(cudaMalloc((void **)&d_hqq, k2*sizeof(float4))); } //printf("hqq: Done allocate memory...\n"); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); Float2Float4Kernel2D <<< blocks, threads >>> ( d_hqq_xx, d_hqq_xy, d_hqq_yx, d_hqq_yy, d_hqq, k); //printf("hqq: Done copy 9 float to float9...\n"); checkCudaErrors(cudaFree(d_hqq_xx)); checkCudaErrors(cudaFree(d_hqq_xy)); checkCudaErrors(cudaFree(d_hqq_yx)); checkCudaErrors(cudaFree(d_hqq_yy)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_hqq, d_hqq, k2 * sizeof(float4), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hqq)); } //printf("hqq: Done copy back to host...\n"); //printf("Done hqq.\n"); //printf("Calculating hqp...\n"); //////////////////////////////////////////////////////////////////////////////////////////////////// /* hqp */ float * d_hqp_xx; float * d_hqp_xy; float * d_hqp_yx; float * d_hqp_yy; float * d_hqp_ii_xx; float * d_hqp_ii_xy; float * d_hqp_ii_yx; float * d_hqp_ii_yy; // Allocate memory checkCudaErrors(cudaMalloc((void **)&d_hqp_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_xy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_yx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_yy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_xy, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_yx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_ii_yy, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hqpPreComputeKernel <<< blocks, threads >>> (d_p, d_g, f, d_dq, d_hqp_xx, d_hqp_xy, d_hqp_yx, d_hqp_yy, d_hqp_ii_xx, d_hqp_ii_xy, d_hqp_ii_yx, d_hqp_ii_yy, k); // The diagonal terms need sum - again use cublas float * d_hqp_diag_xx; float * d_hqp_diag_xy; float * d_hqp_diag_yx; float * d_hqp_diag_yy; checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_xx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_xy, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_yx, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hqp_diag_yy, k*sizeof(float))); // cublas sum checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xx, k, d_one, 1, &beta, d_hqp_diag_xx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_xy, k, d_one, 1, &beta, d_hqp_diag_xy, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yx, k, d_one, 1, &beta, d_hqp_diag_yx, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_hqp_ii_yy, k, d_one, 1, &beta, d_hqp_diag_yy, 1) ); // Release checkCudaErrors(cudaFree(d_hqp_ii_xx)); checkCudaErrors(cudaFree(d_hqp_ii_xy)); checkCudaErrors(cudaFree(d_hqp_ii_yx)); checkCudaErrors(cudaFree(d_hqp_ii_yy)); // copy the diagonal terms into the matrix threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); copyToDiagonal <<< blocks, threads >>> (d_hqp_xx, d_hqp_diag_xx, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_xy, d_hqp_diag_xy, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_yx, d_hqp_diag_yx, k); copyToDiagonal <<< blocks, threads >>> (d_hqp_yy, d_hqp_diag_yy, k); checkCudaErrors(cudaFree(d_hqp_diag_xx)); checkCudaErrors(cudaFree(d_hqp_diag_xy)); checkCudaErrors(cudaFree(d_hqp_diag_yx)); checkCudaErrors(cudaFree(d_hqp_diag_yy)); // copy the result back to host float4 * d_hqp; if (dataInDevice){ d_hqp = h_hqp; }else{ checkCudaErrors(cudaMalloc((void **)&d_hqp, k2*sizeof(float4))); } threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); Float2Float4Kernel2D <<< blocks, threads >>> ( d_hqp_xx, d_hqp_xy, d_hqp_yx, d_hqp_yy, d_hqp, k); checkCudaErrors(cudaFree(d_hqp_xx)); checkCudaErrors(cudaFree(d_hqp_xy)); checkCudaErrors(cudaFree(d_hqp_yx)); checkCudaErrors(cudaFree(d_hqp_yy)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_hqp, d_hqp, k2 * sizeof(float4), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hqp)); } //printf("Done hqp.\n"); //printf("Calculating hpp...\n"); //////////////////////////////////////////////////////////////////////////////////////////// /* hpp */ float * d_hpp_xx; float * d_hpp_yy; checkCudaErrors(cudaMalloc((void **)&d_hpp_xx, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_hpp_yy, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); hppPreComputeKernel <<< blocks, threads >>> (d_g, d_hpp_xx, d_hpp_yy, k); // copy the result back to host float * d_zero; checkCudaErrors(cudaMalloc((void **)&d_zero, k2*sizeof(float))); checkCudaErrors(cudaMemset(d_zero, 0, k2*sizeof(float))); float4 * d_hpp; if (dataInDevice){ d_hpp = h_hpp; }else{ checkCudaErrors(cudaMalloc((void **)&d_hpp, k2*sizeof(float4))); } Float2Float4Kernel2D <<< blocks, threads >>> ( d_hpp_xx, d_zero, d_zero, d_hpp_yy, d_hpp, k); checkCudaErrors(cudaFree(d_zero)); if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_hpp, d_hpp, k2 * sizeof(float4), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_hpp)); } //printf("Done hpp.\n"); //printf("Done 2nd derivative.\n"); } checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double hamiltonian_time = sdkGetTimerValue(&hTimer); //printf("Hamiltonian takes %f ms.\n", hamiltonian_time); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( cudaFree(d_p) ); checkCudaErrors( cudaFree(d_q) ); } checkCudaErrors(cudaFree(d_pi_pj)); checkCudaErrors(cudaFree(d_dq)); checkCudaErrors(cudaFree(d_g)); checkCudaErrors(cudaFree(d_one)); checkCublasErrors( cublasDestroy(handle) ); checkCudaErrors(cudaDeviceSynchronize()); return H; } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D(float2 * h_q, float2 * h_p, float2 * h_alpha, float2 * h_beta, float2 * h_dalpha, float2 * h_dbeta, float sigma, int k, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; float * d_one; checkCudaErrors(cudaMalloc((void **)&d_one, k*sizeof(float))); threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); onesKernel1D <<< blocks, threads >>> (d_one, k); // Initialize cublas cublasHandle_t handle; checkCublasErrors( cublasCreate(&handle) ); // cublasOperation_t trans = CUBLAS_OP_N; // AT if transa == CUBLAS_OP_T cublasOperation_t trans = CUBLAS_OP_T; // AT if transa == CUBLAS_OP_T float alf=1.0; float beta=0; // Some memory control stuff float2 * d_q; float2 * d_p; float2 * d_alpha; float2 * d_beta; if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_beta, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_alpha, h_alpha, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_beta, h_beta, k * sizeof(float2), cudaMemcpyHostToDevice)); } // Start timer StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); // allocate the memory float * d_pi_pj; float2 * d_dq; float * d_g; checkCudaErrors(cudaMalloc((void **)&d_pi_pj, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_dq, k2*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_g, k2*sizeof(float))); // Calculate the pi_pj, dq, g and (pi_pj * g) threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dqpipjKernel <<< blocks, threads >>> (d_q, d_dq, d_g, f, d_p, d_pi_pj, k); // Calculate the dj-di threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); float2 * d_dbji; checkCudaErrors(cudaMalloc((void **)&d_dbji, k2*sizeof(float2))); dbjiKernel <<< blocks, threads >>> ( d_beta, d_dbji, k ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dalpha */ // Precompute for the da and aa terms float * d_da_pre_x; float * d_da_pre_y; checkCudaErrors(cudaMalloc((void **)&d_da_pre_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_da_pre_y, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dalphaPrecomputeKernel <<< blocks, threads >>> (d_pi_pj, d_dq, d_g, d_dbji, f, k, d_da_pre_x, d_da_pre_y, d_p, d_alpha); // Use cublas to sum float * d_da_x; float * d_da_y; checkCudaErrors(cudaMalloc((void **)&d_da_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_da_y, k*sizeof(float))); // cublas m * v checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_da_pre_x, k, d_one, 1, &beta, d_da_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_da_pre_y, k, d_one, 1, &beta, d_da_y, 1) ); checkCudaErrors( cudaFree(d_da_pre_x) ); checkCudaErrors( cudaFree(d_da_pre_y) ); // 2 float to float2 float2 * d_dalpha; if (dataInDevice){ d_dalpha = h_dalpha; } else { checkCudaErrors(cudaMalloc((void **)&d_dalpha, k*sizeof(float2))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); Float2Float2Kernel1D <<< blocks, threads >>> ( d_da_x, d_da_y, d_dalpha, k); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_dalpha, d_dalpha, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaFree(d_dalpha) ); } checkCudaErrors( cudaFree(d_da_x) ); checkCudaErrors( cudaFree(d_da_y) ); //////////////////////////////////////////////////////////////////////////////////////////////////////////// /* dbeta */ // precompute float * d_db_pre_x; float * d_db_pre_y; checkCudaErrors(cudaMalloc((void **)&d_db_pre_x, k2*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_db_pre_y, k2*sizeof(float))); threads = dim3(16, 16, 1); blocks = dim3( (k+15)/16, (k+15)/16, 1); dbetaPrecomputeKernel <<< blocks, threads >>> ( d_p, d_dq, d_g, d_dbji, f, k, d_db_pre_x, d_db_pre_y, d_alpha); // Use cublas to sum float * d_db_x; float * d_db_y; checkCudaErrors(cudaMalloc((void **)&d_db_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_db_y, k*sizeof(float))); // cublas m * v checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_db_pre_x, k, d_one, 1, &beta, d_db_x, 1) ); checkCublasErrors( cublasSgemv(handle, trans, k, k, &alf, d_db_pre_y, k, d_one, 1, &beta, d_db_y, 1) ); checkCudaErrors( cudaFree(d_db_pre_x) ); checkCudaErrors( cudaFree(d_db_pre_y) ); // 3 float to float3 float2 * d_dbeta; if (dataInDevice){ d_dbeta = h_dbeta; } else { checkCudaErrors(cudaMalloc((void **)&d_dbeta, k*sizeof(float2))); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); Float2Float2Kernel1D <<< blocks, threads >>> ( d_db_x, d_db_y, d_dbeta, k); // add the alpha term threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); addKernel1D <<< blocks, threads >>> (d_dbeta, d_alpha, d_dbeta, k ); // copy the result back to host mem if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_dbeta, d_dbeta, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaFree(d_dbeta) ); } checkCudaErrors( cudaFree(d_db_x) ); checkCudaErrors( cudaFree(d_db_y) ); // stop timer checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Alpha_beta takes %f ms.\n", alpha_beta_time); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors( cudaFree(d_p) ); checkCudaErrors( cudaFree(d_q) ); checkCudaErrors( cudaFree(d_alpha) ); checkCudaErrors( cudaFree(d_beta) ); } checkCudaErrors(cudaFree(d_pi_pj)); checkCudaErrors(cudaFree(d_dq)); checkCudaErrors(cudaFree(d_g)); checkCudaErrors(cudaFree(d_dbji)); checkCudaErrors(cudaFree(d_one)); checkCublasErrors( cublasDestroy(handle) ); checkCudaErrors(cudaDeviceSynchronize()); } float PointSetHamiltonianSystem_CUDA::landmarkError_CUDA2D(float2 * h_q1, float2 * h_qT, float2 * h_alpha, int k, bool dataInDevice){ // Variables float2 * d_q1; float2 * d_qT; float2 * d_alpha; float * d_alpha_mag; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_alpha_mag, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_alpha = h_alpha; }else{ checkCudaErrors(cudaMalloc((void **)&d_q1, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_qT, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q1, h_q1, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_qT, h_qT, k * sizeof(float2), cudaMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); minusAndMagKernel1D <<< blocks, threads >>> (d_q1, d_qT, d_alpha, d_alpha_mag, k); float fnorm_sq = Reducer::reduce_sum_wrapper(k, d_alpha_mag); checkCudaErrors(cudaFree(d_alpha_mag)); if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_alpha, d_alpha, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_q1)); checkCudaErrors(cudaFree(d_qT)); checkCudaErrors(cudaFree(d_alpha)); } return fnorm_sq; } void PointSetHamiltonianSystem_CUDA::combineGradient_CUDA2D(float2 * h_grad, float2 * h_hp, int k, float lambda, bool dataInDevice){ // Variables float2 * d_grad; float2 * d_hp; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_grad = h_grad; d_hp = h_hp; }else{ checkCudaErrors(cudaMalloc((void **)&d_grad, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_grad, h_grad, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_hp, h_grad, k * sizeof(float2), cudaMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_hp, d_grad, lambda, d_grad, k); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_grad, d_grad, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_grad)); checkCudaErrors(cudaFree(d_hp)); } } void PointSetHamiltonianSystem_CUDA::initP_CUDA2D(float2 * h_q0, float2 * h_qT, float2 * h_p0, int N, int k, bool dataInDevice){ // Variables float2 * d_q0; float2 * d_qT; float2 * d_p0; dim3 threads; dim3 blocks; // Some memory control stuff if (dataInDevice){ d_q0 = h_q0; d_qT = h_qT; d_p0 = h_p0; }else{ checkCudaErrors(cudaMalloc((void **)&d_q0, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_qT, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p0, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q0, h_q0, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_qT, h_qT, k * sizeof(float2), cudaMemcpyHostToDevice)); } // Calculate the difference and the magnitude threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); minusAndDivideKernel1D <<< blocks, threads >>> (d_qT, d_q0, d_p0, (float) N, k ); // Clean up if (dataInDevice){ // Do nothing. Memory control relies on outside code }else{ checkCudaErrors(cudaMemcpy(h_p0, d_p0, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_q0)); checkCudaErrors(cudaFree(d_qT)); checkCudaErrors(cudaFree(d_p0)); } } void PointSetHamiltonianSystem_CUDA::GAlphaBeta_CUDA2D(float2 * h_q1, float2 * h_qT, float2 * h_p1, float2 * h_alpha, float2 * h_beta, float &Gnorm_sq, float &dsq, float lambda, int k, bool dataInDevice ){ // Variables float2 * d_q1; float2 * d_qT; float2 * d_p1; float2 * d_alpha; float2 * d_beta; float * d_gnsq; float * d_dsq; dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_gnsq, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_dsq, k*sizeof(float))); if (dataInDevice){ d_q1 = h_q1; d_qT = h_qT; d_p1 = h_p1; d_alpha = h_alpha; d_beta = h_beta; }else{ checkCudaErrors(cudaMalloc((void **)&d_q1, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_qT, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p1, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_beta, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q1, h_q1, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_qT, h_qT, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p1, h_p1, k * sizeof(float2), cudaMemcpyHostToDevice)); } // Compute G and alpha/beta threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); GAlphaBetaKernel <<< blocks, threads >>> (d_q1, d_qT, d_p1, d_alpha, d_beta, d_gnsq, d_dsq, lambda, k); Gnorm_sq = Reducer::reduce_sum_wrapper(k, d_gnsq); dsq = Reducer::reduce_sum_wrapper(k, d_dsq); // Clean up checkCudaErrors(cudaFree(d_gnsq)); checkCudaErrors(cudaFree(d_dsq)); if (dataInDevice){ // Do nothing. Duty of managing memory reply on outside code }else{ checkCudaErrors(cudaMemcpy(h_alpha, d_alpha, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_beta, d_beta, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_q1)); checkCudaErrors(cudaFree(d_qT)); checkCudaErrors(cudaFree(d_p1)); checkCudaErrors(cudaFree(d_alpha)); checkCudaErrors(cudaFree(d_beta)); } } void PointSetHamiltonianSystem_CUDA::FlowHamiltonianWithGradient_CUDA2D( std::vector<float2*> &Qt, std::vector<float2*> &Pt, float2 * h_q0, float2 * h_p0, float2 * h_q, float2 * h_p, float4 * h_grad_q, float4 * h_grad_p, int N, int k, bool dataInDevice){ } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA2D(float2 * h_q0, float2 * h_p0, float2 * h_q1, float2 * h_p1, float2 * h_hq, float2 * h_hp, std::vector<float2*> &Qt, std::vector<float2*> &Pt, float sigma, int k, int N, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float2 * d_q_t; float2 * d_p_t; float2 * d_hq; float2 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(cudaMalloc((void **)&d_q_t, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p_t, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float2))); // Some memory control stuff if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); } } else { checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float2), cudaMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float2), cudaMemcpyHostToHost)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float2), cudaMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D(d_q_t, d_p_t, d_hq, d_hp, NULL, NULL, NULL, sigma, k, false, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_q_t, d_hp, dt, d_q_t, k); updateKernel1D <<< blocks, threads >>> (d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); } // Clean up checkCudaErrors(cudaFree(d_q_t)); checkCudaErrors(cudaFree(d_p_t)); checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); return H0; } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA2D(std::vector<float2*> &Qt, std::vector<float2*> &Pt, const float2 * d_alpha, const float2 * d_beta, float2 * d_result, float sigma, int k, int N, bool dataInDevice){ // Variables float2 * d_a; float2 * d_b; float2 * d_Da; float2 * d_Db; float2 * d_q; float2 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_a, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_b, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_Da, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_Db, k*sizeof(float2))); if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(cudaMemcpy(d_q, Qt[t - 1], k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, Pt[t - 1], k * sizeof(float2), cudaMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_a, d_Da, dt, d_a, k); updateKernel1D <<< blocks, threads >>> (d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float2), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_p)); } checkCudaErrors(cudaFree(d_a)); checkCudaErrors(cudaFree(d_b)); checkCudaErrors(cudaFree(d_Da)); checkCudaErrors(cudaFree(d_Db)); } void PointSetHamiltonianSystem_CUDA::InterpolateVelocity_CUDA2D(unsigned int t, const float2 x, float2 &v, std::vector<float2*> &Qt, std::vector<float2*> &Pt, float sigma, int k, bool dataInDevice){ // Variables float f = -0.5 / (sigma * sigma); dim3 threads; dim3 blocks; float2 * d_q; float2 * d_p; float * d_KqPt_x; float * d_KqPt_y; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_KqPt_x, k*sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_KqPt_y, k*sizeof(float))); if (dataInDevice){ d_q = Qt[t]; d_p = Pt[t]; }else{ checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q, Qt[t], k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, Pt[t], k * sizeof(float2), cudaMemcpyHostToDevice)); } threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); KqPtKernel <<< blocks, threads >>> ( d_q, d_p, x, f, d_KqPt_x, d_KqPt_y, k); v.x = Reducer::reduce_sum_wrapper(k, d_KqPt_x); v.y = Reducer::reduce_sum_wrapper(k, d_KqPt_y); // Clean up if (dataInDevice){ // Do nothing } else { checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_p)); } checkCudaErrors(cudaFree(d_KqPt_x)); checkCudaErrors(cudaFree(d_KqPt_y)); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Accelerated hqhp and alphabeta void hqhpRestrictedKernel_wrapper(float3 * d_q, float3 * d_p, float3 * d_hq, float3 * d_hp, float * d_ham, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 7 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 512: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 256: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 128: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 64: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } void hqhpRestrictedKernel_wrapper(float2 * d_q, float2 * d_p, float2 * d_hq, float2 * d_hp, float * d_ham, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 5 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 512: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 256: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 128: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; case 64: hqhpRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, f, d_hq, d_hp, d_ham, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D_Restricted(float3 * h_q, float3 * h_p, float3 * h_hq, float3 * h_hp, float sigma, int k, int blockSize, bool dataInDevice ){ // Variables float f = -0.5 / (sigma * sigma); float3 * d_q; float3 * d_p; float3 * d_hq; float3 * d_hp; float * d_ham; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float3), cudaMemcpyHostToDevice)); } checkCudaErrors(cudaMalloc((void **)&d_ham, k*sizeof(float))); // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double hamiltonian_time = sdkGetTimerValue(&hTimer); // //printf("Restricted Hamiltonian takes %f ms.\n", hamiltonian_time); // checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemset(d_hq, 0, k*sizeof(float3))); checkCudaErrors(cudaMemset(d_hp, 0, k*sizeof(float3))); checkCudaErrors(cudaMemset(d_ham, 0, k*sizeof(float))); hqhpRestrictedKernel_wrapper(d_q, d_p, d_hq, d_hp, d_ham, f, k, blockSize); // Calculate hamiltonian float H = 0.5 * Reducer::reduce_sum_wrapper(k, d_ham); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_p)); checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } checkCudaErrors(cudaFree(d_ham)); checkCudaErrors(cudaDeviceSynchronize()); return H; } float PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D_Restricted(float2 * h_q, float2 * h_p, float2 * h_hq, float2 * h_hp, float sigma, int k, int blockSize, bool dataInDevice ){ // Variables float f = -0.5 / (sigma * sigma); float2 * d_q; float2 * d_p; float2 * d_hq; float2 * d_hp; float * d_ham; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_hq = h_hq; d_hp = h_hp; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float2), cudaMemcpyHostToDevice)); } checkCudaErrors(cudaMalloc((void **)&d_ham, k*sizeof(float))); // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double hamiltonian_time = sdkGetTimerValue(&hTimer); // //printf("Restricted Hamiltonian takes %f ms.\n", hamiltonian_time); // checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemset(d_hq, 0, k*sizeof(float2))); checkCudaErrors(cudaMemset(d_hp, 0, k*sizeof(float2))); checkCudaErrors(cudaMemset(d_ham, 0, k*sizeof(float))); hqhpRestrictedKernel_wrapper(d_q, d_p, d_hq, d_hp, d_ham, f, k, blockSize); // Calculate hamiltonian float H = 0.5 * Reducer::reduce_sum_wrapper(k, d_ham); // Clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_p)); checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } checkCudaErrors(cudaFree(d_ham)); checkCudaErrors(cudaDeviceSynchronize()); return H; } void alphaBetaRestrictedKernel_wrapper(float3 * d_q, float3 * d_p, float3 * d_alpha, float3 * d_beta, float3 * d_dalpha, float3 * d_dbeta, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 6 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 512: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 256: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 128: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 64: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } void alphaBetaRestrictedKernel_wrapper(float2 * d_q, float2 * d_p, float2 * d_alpha, float2 * d_beta, float2 * d_dalpha, float2 * d_dbeta, float f, int k, int blockSize){ dim3 dimBlock( blockSize, 1, 1 ); dim3 dimGrid( (k+2*blockSize-1) / (2*blockSize), k, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = 4 * blockSize * sizeof(float); // Debug: check the cover- cover is actually normal. Then the reduce process must have bug switch (blockSize){ case 1024: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 512: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 256: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 128: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; case 64: alphaBetaRestrictedKernel <<< dimGrid, dimBlock, smemSize >>> ( d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); break; default: printf("From function hqhpRestrictedKernel_wrapper: The number of thread is not a power of 2 or it is" " smaller than 64 or larger than 1024. Check your code!\n"); } } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D_Restricted(float3 * h_q, float3 * h_p, float3 * h_alpha, float3 * h_beta, float3 * h_dalpha, float3 * h_dbeta, float sigma, int k, int blockSize, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // variables float3 * d_q; float3 * d_p; float3 * d_alpha; float3 * d_beta; float3 * d_dalpha; float3 * d_dbeta; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; d_dalpha = h_dalpha; d_dbeta = h_dbeta; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_beta, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_dalpha, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_dbeta, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_alpha, h_alpha, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_beta, h_beta, k * sizeof(float3), cudaMemcpyHostToDevice)); } // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Restricted alpha_beta takes %f ms.\n", alpha_beta_time); // // stop timer // checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemset(d_dalpha, 0, k*sizeof(float3))); checkCudaErrors(cudaMemset(d_dbeta, 0, k*sizeof(float3))); ///////////////////////////////////////////////////////////////////////////////////////// // run the wrapper alphaBetaRestrictedKernel_wrapper(d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { // copy the result back checkCudaErrors(cudaMemcpy(h_dalpha, d_dalpha, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_dbeta, d_dbeta, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaFree(d_p) ); checkCudaErrors( cudaFree(d_q) ); checkCudaErrors( cudaFree(d_alpha) ); checkCudaErrors( cudaFree(d_beta) ); checkCudaErrors( cudaFree(d_dalpha) ); checkCudaErrors( cudaFree(d_dbeta) ); } checkCudaErrors(cudaDeviceSynchronize()); } void PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D_Restricted(float2 * h_q, float2 * h_p, float2 * h_alpha, float2 * h_beta, float2 * h_dalpha, float2 * h_dbeta, float sigma, int k, int blockSize, bool dataInDevice ){ // Some variable float f = -0.5 / (sigma * sigma); long k2 = k*k; dim3 threads; dim3 blocks; // variables float2 * d_q; float2 * d_p; float2 * d_alpha; float2 * d_beta; float2 * d_dalpha; float2 * d_dbeta; // Some memory control stuff if (dataInDevice){ d_q = h_q; d_p = h_p; d_alpha = h_alpha; d_beta = h_beta; d_dalpha = h_dalpha; d_dbeta = h_dbeta; } else { checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_alpha, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_beta, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_dalpha, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_dbeta, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q, h_q, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_alpha, h_alpha, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_beta, h_beta, k * sizeof(float2), cudaMemcpyHostToDevice)); } // Start timer // StopWatchInterface *hTimer = NULL; // sdkCreateTimer(&hTimer); // sdkResetTimer(&hTimer); // sdkStartTimer(&hTimer); // sdkStopTimer(&hTimer); // double alpha_beta_time = sdkGetTimerValue(&hTimer); //printf("Restricted alpha_beta takes %f ms.\n", alpha_beta_time); // // stop timer // checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemset(d_dalpha, 0, k*sizeof(float2))); checkCudaErrors(cudaMemset(d_dbeta, 0, k*sizeof(float2))); ///////////////////////////////////////////////////////////////////////////////////////// // run the wrapper alphaBetaRestrictedKernel_wrapper(d_q, d_p, d_alpha, d_beta, d_dalpha, d_dbeta, f, k, blockSize); // clean up if (dataInDevice){ // Do nothing. Duty to manage memory relies on outside code } else { // copy the result back checkCudaErrors(cudaMemcpy(h_dalpha, d_dalpha, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_dbeta, d_dbeta, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaFree(d_p) ); checkCudaErrors( cudaFree(d_q) ); checkCudaErrors( cudaFree(d_alpha) ); checkCudaErrors( cudaFree(d_beta) ); checkCudaErrors( cudaFree(d_dalpha) ); checkCudaErrors( cudaFree(d_dbeta) ); } checkCudaErrors(cudaDeviceSynchronize()); } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA3D_Restricted(float3 * h_q0, float3 * h_p0, float3 * h_q1, float3 * h_p1, float3 * h_hq, float3 * h_hp, std::vector<float3*> &Qt, std::vector<float3*> &Pt, float sigma, int k, int N, int blockSize, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float3 * d_q_t; float3 * d_p_t; float3 * d_hq; float3 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(cudaMalloc((void **)&d_q_t, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p_t, k*sizeof(float3))); // Some memory control stuff if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float3), cudaMemcpyDeviceToDevice)); } } else { checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float3))); checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float3), cudaMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float3), cudaMemcpyHostToHost)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float3), cudaMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA3D_Restricted(d_q_t, d_p_t, d_hq, d_hp, sigma, k, blockSize, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_q_t, d_hp, dt, d_q_t, k); updateKernel1D <<< blocks, threads >>> (d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float3), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float3), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. Duty to manage mem replies on outside code }else{ checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } checkCudaErrors(cudaFree(d_q_t)); checkCudaErrors(cudaFree(d_p_t)); checkCudaErrors(cudaDeviceSynchronize()); return H0; } float PointSetHamiltonianSystem_CUDA::FlowHamiltonian_CUDA2D_Restricted(float2 * h_q0, float2 * h_p0, float2 * h_q1, float2 * h_p1, float2 * h_hq, float2 * h_hp, std::vector<float2*> &Qt, std::vector<float2*> &Pt, float sigma, int k, int N, int blockSize, bool saveIntermediate, bool dataInDevice){ float dt = 1.0 / (float)(N-1); // Initialize q and p // The return value float H, H0; float2 * d_q_t; float2 * d_p_t; float2 * d_hq; float2 * d_hp; dim3 threads; dim3 blocks; checkCudaErrors(cudaMalloc((void **)&d_q_t, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p_t, k*sizeof(float2))); // Some memory control stuff if (dataInDevice){ d_hq = h_hq; d_hp = h_hp; checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float2), cudaMemcpyDeviceToDevice)); } } else { checkCudaErrors(cudaMalloc((void **)&d_hq, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_hp, k*sizeof(float2))); checkCudaErrors(cudaMemcpy(d_q_t, h_q0, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p_t, h_p0, k * sizeof(float2), cudaMemcpyHostToDevice)); if (saveIntermediate){ checkCudaErrors(cudaMemcpy(Qt[0], h_q0, k * sizeof(float2), cudaMemcpyHostToHost)); checkCudaErrors(cudaMemcpy(Pt[0], h_p0, k * sizeof(float2), cudaMemcpyHostToHost)); } } // Flow over time for(int t = 1; t < N; t++){ // Compute the hamiltonian H = PointSetHamiltonianSystem_CUDA::ComputeHamiltonianJet_CUDA2D_Restricted(d_q_t, d_p_t, d_hq, d_hp, sigma, k, blockSize, true); // Euler update threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_q_t, d_hp, dt, d_q_t, k); updateKernel1D <<< blocks, threads >>> (d_p_t, d_hq, -dt, d_p_t, k); // Save intermediate result if necessary if (saveIntermediate){ if (dataInDevice){ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(Qt[t], d_q_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(Pt[t], d_p_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); } } // store the first hamiltonian value if(t == 1) H0 = H; } // copy the final result out if (dataInDevice){ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(h_q1, d_q_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_p1, d_p_t, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hq, d_hq, k * sizeof(float2), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_hp, d_hp, k * sizeof(float2), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. Duty to manage mem replies on outside code }else{ checkCudaErrors(cudaFree(d_hq)); checkCudaErrors(cudaFree(d_hp)); } checkCudaErrors(cudaFree(d_q_t)); checkCudaErrors(cudaFree(d_p_t)); checkCudaErrors(cudaDeviceSynchronize()); return H0; } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA3D_Restricted( std::vector<float3*> &Qt, std::vector<float3*> &Pt, const float3 * d_alpha, const float3 * d_beta, float3 * d_result, float sigma, int k, int N, int blockSize, bool dataInDevice){ // Variables float3 * d_a; float3 * d_b; float3 * d_Da; float3 * d_Db; float3 * d_q; float3 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_a, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_b, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_Da, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_Db, k*sizeof(float3))); if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float3), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float3))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float3))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(cudaMemcpy(d_q, Qt[t - 1], k * sizeof(float3), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, Pt[t - 1], k * sizeof(float3), cudaMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA3D_Restricted(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, blockSize, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_a, d_Da, dt, d_a, k); updateKernel1D <<< blocks, threads >>> (d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float3), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float3), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_p)); } checkCudaErrors(cudaFree(d_a)); checkCudaErrors(cudaFree(d_b)); checkCudaErrors(cudaFree(d_Da)); checkCudaErrors(cudaFree(d_Db)); checkCudaErrors(cudaDeviceSynchronize()); } void PointSetHamiltonianSystem_CUDA::FlowGradientBackward_CUDA2D_Restricted( std::vector<float2*> &Qt, std::vector<float2*> &Pt, const float2 * d_alpha, const float2 * d_beta, float2 * d_result, float sigma, int k, int N, int blockSize, bool dataInDevice){ // Variables float2 * d_a; float2 * d_b; float2 * d_Da; float2 * d_Db; float2 * d_q; float2 * d_p; float dt = 1.0 / (float)(N-1); dim3 threads; dim3 blocks; // Some memory control stuff checkCudaErrors(cudaMalloc((void **)&d_a, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_b, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_Da, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_Db, k*sizeof(float2))); if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float2), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_a, d_alpha, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_b, d_beta, k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void **)&d_q, k*sizeof(float2))); checkCudaErrors(cudaMalloc((void **)&d_p, k*sizeof(float2))); } // Work our way backwards for(int t = N-1; t > 0; t--){ // Load intermediate q and p if (dataInDevice){ d_q = Qt[t - 1]; d_p = Pt[t - 1]; }else{ checkCudaErrors(cudaMemcpy(d_q, Qt[t - 1], k * sizeof(float2), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, Pt[t - 1], k * sizeof(float2), cudaMemcpyHostToDevice)); } // Apply Hamiltonian Hessian to get an update in alpha/beta PointSetHamiltonianSystem_CUDA::ApplyHamiltonianHessianToAlphaBeta_CUDA2D_Restricted(d_q, d_p, d_a, d_b, d_Da, d_Db, sigma, k, blockSize, true ); // Update the vectors threads = dim3(256, 1, 1); blocks = dim3( (k+255)/256, 1, 1); updateKernel1D <<< blocks, threads >>> (d_a, d_Da, dt, d_a, k); updateKernel1D <<< blocks, threads >>> (d_b, d_Db, dt, d_b, k); } // Finally, what we are really after are the betas if (dataInDevice){ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float2), cudaMemcpyDeviceToDevice)); }else{ checkCudaErrors(cudaMemcpy(d_result, d_b, k * sizeof(float2), cudaMemcpyDeviceToHost)); } // Clean up if (dataInDevice){ // Do nothing. }else{ checkCudaErrors(cudaFree(d_q)); checkCudaErrors(cudaFree(d_p)); } checkCudaErrors(cudaFree(d_a)); checkCudaErrors(cudaFree(d_b)); checkCudaErrors(cudaFree(d_Da)); checkCudaErrors(cudaFree(d_Db)); checkCudaErrors(cudaDeviceSynchronize()); }
832a544c14709566d4f32d4bdb0492e96fcd9e12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/generate_proposals_kernel.h" #include <algorithm> #include <vector> #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/detection/bbox_util.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) int const kThreadsPerBlock = sizeof(uint64_t) * 8; static const double kBBoxClipDefault = ::log(1000.0 / 16.0); template <typename T> static void SortDescending(const phi::GPUContext &ctx, const DenseTensor &value, DenseTensor *value_out, DenseTensor *index_out) { int num = static_cast<int>(value.numel()); DenseTensor index_in_t; index_in_t.Resize(phi::make_ddim({num})); int *idx_in = ctx.template Alloc<int>(&index_in_t); phi::funcs::ForRange<phi::GPUContext> for_range(ctx, num); for_range(funcs::RangeInitFunctor{0, 1, idx_in}); index_out->Resize(phi::make_ddim({num})); int *idx_out = ctx.template Alloc<int>(index_out); const T *keys_in = value.data<T>(); value_out->Resize(phi::make_ddim({num})); T *keys_out = ctx.template Alloc<T>(value_out); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairsDescending<T, int>(nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num, 0, sizeof(T) * 8, ctx.stream()); // Allocate temporary storage auto place = ctx.GetPlace(); auto d_temp_storage = phi::memory_utils::Alloc(place, temp_storage_bytes); // Run sorting operation hipcub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num, 0, sizeof(T) * 8, ctx.stream()); } template <typename T> struct BoxDecodeAndClipFunctor { const T *anchor; const T *deltas; const T *var; const int *index; const T *im_info; const bool pixel_offset; T *proposals; BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var, const int *index, const T *im_info, T *proposals, bool pixel_offset = true) : anchor(anchor), deltas(deltas), var(var), index(index), im_info(im_info), proposals(proposals), pixel_offset(pixel_offset) {} T bbox_clip_default{static_cast<T>(kBBoxClipDefault)}; __device__ void operator()(size_t i) { int k = index[i] * 4; T axmin = anchor[k]; T aymin = anchor[k + 1]; T axmax = anchor[k + 2]; T aymax = anchor[k + 3]; T offset = pixel_offset ? static_cast<T>(1.0) : 0; T w = axmax - axmin + offset; T h = aymax - aymin + offset; T cx = axmin + 0.5 * w; T cy = aymin + 0.5 * h; T dxmin = deltas[k]; T dymin = deltas[k + 1]; T dxmax = deltas[k + 2]; T dymax = deltas[k + 3]; T d_cx, d_cy, d_w, d_h; if (var) { d_cx = cx + dxmin * w * var[k]; d_cy = cy + dymin * h * var[k + 1]; d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w; d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h; } else { d_cx = cx + dxmin * w; d_cy = cy + dymin * h; d_w = exp(Min(dxmax, bbox_clip_default)) * w; d_h = exp(Min(dymax, bbox_clip_default)) * h; } T oxmin = d_cx - d_w * 0.5; T oymin = d_cy - d_h * 0.5; T oxmax = d_cx + d_w * 0.5 - offset; T oymax = d_cy + d_h * 0.5 - offset; proposals[i * 4] = Max(Min(oxmin, im_info[1] - offset), 0.); proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - offset), 0.); proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - offset), 0.); proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - offset), 0.); } __device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; } __device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; } }; template <typename T, int BlockSize> static __global__ void FilterBBoxes(const T *bboxes, const T *im_info, const T min_size, const int num, int *keep_num, int *keep, bool is_scale = true, bool pixel_offset = true) { T im_h = im_info[0]; T im_w = im_info[1]; int cnt = 0; __shared__ int keep_index[BlockSize]; CUDA_KERNEL_LOOP(i, num) { keep_index[threadIdx.x] = -1; __syncthreads(); int k = i * 4; T xmin = bboxes[k]; T ymin = bboxes[k + 1]; T xmax = bboxes[k + 2]; T ymax = bboxes[k + 3]; T offset = pixel_offset ? static_cast<T>(1.0) : 0; T w = xmax - xmin + offset; T h = ymax - ymin + offset; if (pixel_offset) { T cx = xmin + w / 2.; T cy = ymin + h / 2.; if (is_scale) { w = (xmax - xmin) / im_info[2] + 1.; h = (ymax - ymin) / im_info[2] + 1.; } if (w >= min_size && h >= min_size && cx <= im_w && cy <= im_h) { keep_index[threadIdx.x] = i; } } else { if (w >= min_size && h >= min_size) { keep_index[threadIdx.x] = i; } } __syncthreads(); if (threadIdx.x == 0) { int size = (num - i) < BlockSize ? num - i : BlockSize; for (int j = 0; j < size; ++j) { if (keep_index[j] > -1) { keep[cnt++] = keep_index[j]; } } } __syncthreads(); } if (threadIdx.x == 0) { keep_num[0] = cnt; } } static __device__ float IoU(const float *a, const float *b, const bool pixel_offset = true) { float offset = pixel_offset ? static_cast<float>(1.0) : 0; float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + offset, 0.f), height = max(bottom - top + offset, 0.f); float inter_s = width * height; float s_a = (a[2] - a[0] + offset) * (a[3] - a[1] + offset); float s_b = (b[2] - b[0] + offset) * (b[3] - b[1] + offset); return inter_s / (s_a + s_b - inter_s); } static __global__ void NMSKernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask, bool pixel_offset = true) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock); const int col_size = min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock); __shared__ float block_boxes[kThreadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 4; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (IoU(cur_box, block_boxes + i * 4, pixel_offset) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } template <typename T> static void NMS(const phi::GPUContext &ctx, const DenseTensor &proposals, const DenseTensor &sorted_indices, const T nms_threshold, DenseTensor *keep_out, bool pixel_offset = true) { int boxes_num = proposals.dims()[0]; const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock); dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock), DIVUP(boxes_num, kThreadsPerBlock)); dim3 threads(kThreadsPerBlock); const T *boxes = proposals.data<T>(); auto place = ctx.GetPlace(); auto mask_ptr = phi::memory_utils::Alloc( place, boxes_num * col_blocks * sizeof(uint64_t), phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream()))); uint64_t *mask_dev = reinterpret_cast<uint64_t *>(mask_ptr->ptr()); hipLaunchKernelGGL(( NMSKernel), dim3(blocks), dim3(threads), 0, ctx.stream(), boxes_num, nms_threshold, boxes, mask_dev, pixel_offset); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); std::vector<uint64_t> mask_host(boxes_num * col_blocks); memory_utils::Copy(CPUPlace(), mask_host.data(), place, mask_dev, boxes_num * col_blocks * sizeof(uint64_t), ctx.stream()); std::vector<int> keep_vec; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / kThreadsPerBlock; int inblock = i % kThreadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { ++num_to_keep; keep_vec.push_back(i); uint64_t *p = mask_host.data() + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } keep_out->Resize(phi::make_ddim({num_to_keep})); int *keep = ctx.template Alloc<int>(keep_out); memory_utils::Copy(place, keep, CPUPlace(), keep_vec.data(), sizeof(int) * num_to_keep, ctx.stream()); ctx.Wait(); } template <typename T> static std::pair<DenseTensor, DenseTensor> ProposalForOneImage( const phi::GPUContext &ctx, const DenseTensor &im_shape, const DenseTensor &anchors, const DenseTensor &variances, const DenseTensor &bbox_deltas, // [M, 4] const DenseTensor &scores, // [N, 1] int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset) { // 1. pre nms DenseTensor scores_sort, index_sort; SortDescending<T>(ctx, scores, &scores_sort, &index_sort); int num = scores.numel(); int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel() : pre_nms_top_n; scores_sort.Resize(phi::make_ddim({pre_nms_num, 1})); index_sort.Resize(phi::make_ddim({pre_nms_num, 1})); // 2. box decode and clipping DenseTensor proposals; proposals.Resize(phi::make_ddim({pre_nms_num, 4})); ctx.template Alloc<T>(&proposals); { phi::funcs::ForRange<phi::GPUContext> for_range(ctx, pre_nms_num); for_range(BoxDecodeAndClipFunctor<T>{anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(), index_sort.data<int>(), im_shape.data<T>(), proposals.data<T>(), pixel_offset}); } // 3. filter DenseTensor keep_index, keep_num_t; keep_index.Resize(phi::make_ddim({pre_nms_num})); ctx.template Alloc<int>(&keep_index); keep_num_t.Resize(phi::make_ddim({1})); ctx.template Alloc<int>(&keep_num_t); min_size = ::max(min_size, 1.0f); auto stream = ctx.stream(); hipLaunchKernelGGL(( FilterBBoxes<T, 512>), dim3(1), dim3(512), 0, stream, proposals.data<T>(), im_shape.data<T>(), min_size, pre_nms_num, keep_num_t.data<int>(), keep_index.data<int>(), false, pixel_offset); int keep_num; const auto gpu_place = ctx.GetPlace(); memory_utils::Copy(CPUPlace(), &keep_num, gpu_place, keep_num_t.data<int>(), sizeof(int), ctx.stream()); ctx.Wait(); keep_index.Resize(phi::make_ddim({keep_num})); DenseTensor scores_filter, proposals_filter; // Handle the case when there is no keep index left if (keep_num == 0) { phi::funcs::SetConstant<phi::GPUContext, T> set_zero; proposals_filter.Resize(phi::make_ddim({1, 4})); ctx.template Alloc<T>(&proposals_filter); scores_filter.Resize(phi::make_ddim({1, 1})); ctx.template Alloc<T>(&scores_filter); set_zero(ctx, &proposals_filter, static_cast<T>(0)); set_zero(ctx, &scores_filter, static_cast<T>(0)); return std::make_pair(proposals_filter, scores_filter); } proposals_filter.Resize(phi::make_ddim({keep_num, 4})); ctx.template Alloc<T>(&proposals_filter); scores_filter.Resize(phi::make_ddim({keep_num, 1})); ctx.template Alloc<T>(&scores_filter); phi::funcs::GPUGather<T>(ctx, proposals, keep_index, &proposals_filter); phi::funcs::GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter); if (nms_thresh <= 0) { return std::make_pair(proposals_filter, scores_filter); } // 4. nms DenseTensor keep_nms; NMS<T>( ctx, proposals_filter, keep_index, nms_thresh, &keep_nms, pixel_offset); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize(phi::make_ddim({post_nms_top_n})); } DenseTensor scores_nms, proposals_nms; proposals_nms.Resize(phi::make_ddim({keep_nms.numel(), 4})); ctx.template Alloc<T>(&proposals_nms); scores_nms.Resize(phi::make_ddim({keep_nms.numel(), 1})); ctx.template Alloc<T>(&scores_nms); phi::funcs::GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms); phi::funcs::GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms); return std::make_pair(proposals_nms, scores_nms); } template <typename T, typename Context> void GenerateProposalsKernel(const Context &ctx, const DenseTensor &scores, const DenseTensor &bbox_deltas, const DenseTensor &im_shape, const DenseTensor &anchors, const DenseTensor &variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset, DenseTensor *rpn_rois, DenseTensor *rpn_roi_probs, DenseTensor *rpn_rois_num) { PADDLE_ENFORCE_GE( eta, 1., errors::InvalidArgument("Not support adaptive NMS. The attribute 'eta' " "should not less than 1. But received eta=[%d]", eta)); auto scores_dim = scores.dims(); int64_t num = scores_dim[0]; int64_t c_score = scores_dim[1]; int64_t h_score = scores_dim[2]; int64_t w_score = scores_dim[3]; auto bbox_dim = bbox_deltas.dims(); int64_t c_bbox = bbox_dim[1]; int64_t h_bbox = bbox_dim[2]; int64_t w_bbox = bbox_dim[3]; DenseTensor bbox_deltas_swap, scores_swap; bbox_deltas_swap.Resize(phi::make_ddim({num, h_bbox, w_bbox, c_bbox})); ctx.template Alloc<T>(&bbox_deltas_swap); scores_swap.Resize(phi::make_ddim({num, h_score, w_score, c_score})); ctx.template Alloc<T>(&scores_swap); phi::funcs::Transpose<phi::GPUContext, T, 4> trans; std::vector<int> axis = {0, 2, 3, 1}; trans(ctx, bbox_deltas, &bbox_deltas_swap, axis); trans(ctx, scores, &scores_swap, axis); DenseTensor tmp_anchors = anchors; DenseTensor tmp_variances = variances; tmp_anchors.Resize(phi::make_ddim({tmp_anchors.numel() / 4, 4})); tmp_variances.Resize(phi::make_ddim({tmp_variances.numel() / 4, 4})); rpn_rois->Resize(phi::make_ddim({bbox_deltas.numel() / 4, 4})); ctx.template Alloc<T>(rpn_rois); rpn_roi_probs->Resize(phi::make_ddim({scores.numel(), 1})); ctx.template Alloc<T>(rpn_roi_probs); T *rpn_rois_data = rpn_rois->data<T>(); T *rpn_roi_probs_data = rpn_roi_probs->data<T>(); auto place = ctx.GetPlace(); auto cpu_place = phi::CPUPlace(); int64_t num_proposals = 0; std::vector<size_t> offset(1, 0); std::vector<int> tmp_num; for (int64_t i = 0; i < num; ++i) { DenseTensor im_shape_slice = im_shape.Slice(i, i + 1); DenseTensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1); DenseTensor scores_slice = scores_swap.Slice(i, i + 1); bbox_deltas_slice.Resize(phi::make_ddim({h_bbox * w_bbox * c_bbox / 4, 4})); scores_slice.Resize(phi::make_ddim({h_score * w_score * c_score, 1})); std::pair<DenseTensor, DenseTensor> box_score_pair = ProposalForOneImage<T>(ctx, im_shape_slice, tmp_anchors, tmp_variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta, pixel_offset); DenseTensor &proposals = box_score_pair.first; DenseTensor &nscores = box_score_pair.second; memory_utils::Copy(place, rpn_rois_data + num_proposals * 4, place, proposals.data<T>(), sizeof(T) * proposals.numel(), ctx.stream()); memory_utils::Copy(place, rpn_roi_probs_data + num_proposals, place, nscores.data<T>(), sizeof(T) * nscores.numel(), ctx.stream()); ctx.Wait(); num_proposals += proposals.dims()[0]; offset.emplace_back(num_proposals); tmp_num.push_back(proposals.dims()[0]); } if (rpn_rois_num != nullptr) { rpn_rois_num->Resize(phi::make_ddim({num})); ctx.template Alloc<int>(rpn_rois_num); int *num_data = rpn_rois_num->data<int>(); memory_utils::Copy(place, num_data, cpu_place, &tmp_num[0], sizeof(int) * num, ctx.stream()); rpn_rois_num->Resize(phi::make_ddim({num})); } phi::LoD lod; lod.emplace_back(offset); rpn_rois->Resize(phi::make_ddim({num_proposals, 4})); rpn_roi_probs->Resize(phi::make_ddim({num_proposals, 1})); } } // namespace phi PD_REGISTER_KERNEL( generate_proposals, GPU, ALL_LAYOUT, phi::GenerateProposalsKernel, float) { kernel->OutputAt(2).SetDataType(phi::DataType::INT32); }
832a544c14709566d4f32d4bdb0492e96fcd9e12.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/generate_proposals_kernel.h" #include <algorithm> #include <vector> #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/detection/bbox_util.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) int const kThreadsPerBlock = sizeof(uint64_t) * 8; static const double kBBoxClipDefault = std::log(1000.0 / 16.0); template <typename T> static void SortDescending(const phi::GPUContext &ctx, const DenseTensor &value, DenseTensor *value_out, DenseTensor *index_out) { int num = static_cast<int>(value.numel()); DenseTensor index_in_t; index_in_t.Resize(phi::make_ddim({num})); int *idx_in = ctx.template Alloc<int>(&index_in_t); phi::funcs::ForRange<phi::GPUContext> for_range(ctx, num); for_range(funcs::RangeInitFunctor{0, 1, idx_in}); index_out->Resize(phi::make_ddim({num})); int *idx_out = ctx.template Alloc<int>(index_out); const T *keys_in = value.data<T>(); value_out->Resize(phi::make_ddim({num})); T *keys_out = ctx.template Alloc<T>(value_out); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending<T, int>(nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num, 0, sizeof(T) * 8, ctx.stream()); // Allocate temporary storage auto place = ctx.GetPlace(); auto d_temp_storage = phi::memory_utils::Alloc(place, temp_storage_bytes); // Run sorting operation cub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num, 0, sizeof(T) * 8, ctx.stream()); } template <typename T> struct BoxDecodeAndClipFunctor { const T *anchor; const T *deltas; const T *var; const int *index; const T *im_info; const bool pixel_offset; T *proposals; BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var, const int *index, const T *im_info, T *proposals, bool pixel_offset = true) : anchor(anchor), deltas(deltas), var(var), index(index), im_info(im_info), proposals(proposals), pixel_offset(pixel_offset) {} T bbox_clip_default{static_cast<T>(kBBoxClipDefault)}; __device__ void operator()(size_t i) { int k = index[i] * 4; T axmin = anchor[k]; T aymin = anchor[k + 1]; T axmax = anchor[k + 2]; T aymax = anchor[k + 3]; T offset = pixel_offset ? static_cast<T>(1.0) : 0; T w = axmax - axmin + offset; T h = aymax - aymin + offset; T cx = axmin + 0.5 * w; T cy = aymin + 0.5 * h; T dxmin = deltas[k]; T dymin = deltas[k + 1]; T dxmax = deltas[k + 2]; T dymax = deltas[k + 3]; T d_cx, d_cy, d_w, d_h; if (var) { d_cx = cx + dxmin * w * var[k]; d_cy = cy + dymin * h * var[k + 1]; d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w; d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h; } else { d_cx = cx + dxmin * w; d_cy = cy + dymin * h; d_w = exp(Min(dxmax, bbox_clip_default)) * w; d_h = exp(Min(dymax, bbox_clip_default)) * h; } T oxmin = d_cx - d_w * 0.5; T oymin = d_cy - d_h * 0.5; T oxmax = d_cx + d_w * 0.5 - offset; T oymax = d_cy + d_h * 0.5 - offset; proposals[i * 4] = Max(Min(oxmin, im_info[1] - offset), 0.); proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - offset), 0.); proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - offset), 0.); proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - offset), 0.); } __device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; } __device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; } }; template <typename T, int BlockSize> static __global__ void FilterBBoxes(const T *bboxes, const T *im_info, const T min_size, const int num, int *keep_num, int *keep, bool is_scale = true, bool pixel_offset = true) { T im_h = im_info[0]; T im_w = im_info[1]; int cnt = 0; __shared__ int keep_index[BlockSize]; CUDA_KERNEL_LOOP(i, num) { keep_index[threadIdx.x] = -1; __syncthreads(); int k = i * 4; T xmin = bboxes[k]; T ymin = bboxes[k + 1]; T xmax = bboxes[k + 2]; T ymax = bboxes[k + 3]; T offset = pixel_offset ? static_cast<T>(1.0) : 0; T w = xmax - xmin + offset; T h = ymax - ymin + offset; if (pixel_offset) { T cx = xmin + w / 2.; T cy = ymin + h / 2.; if (is_scale) { w = (xmax - xmin) / im_info[2] + 1.; h = (ymax - ymin) / im_info[2] + 1.; } if (w >= min_size && h >= min_size && cx <= im_w && cy <= im_h) { keep_index[threadIdx.x] = i; } } else { if (w >= min_size && h >= min_size) { keep_index[threadIdx.x] = i; } } __syncthreads(); if (threadIdx.x == 0) { int size = (num - i) < BlockSize ? num - i : BlockSize; for (int j = 0; j < size; ++j) { if (keep_index[j] > -1) { keep[cnt++] = keep_index[j]; } } } __syncthreads(); } if (threadIdx.x == 0) { keep_num[0] = cnt; } } static __device__ float IoU(const float *a, const float *b, const bool pixel_offset = true) { float offset = pixel_offset ? static_cast<float>(1.0) : 0; float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + offset, 0.f), height = max(bottom - top + offset, 0.f); float inter_s = width * height; float s_a = (a[2] - a[0] + offset) * (a[3] - a[1] + offset); float s_b = (b[2] - b[0] + offset) * (b[3] - b[1] + offset); return inter_s / (s_a + s_b - inter_s); } static __global__ void NMSKernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask, bool pixel_offset = true) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock); const int col_size = min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock); __shared__ float block_boxes[kThreadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 4; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (IoU(cur_box, block_boxes + i * 4, pixel_offset) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } template <typename T> static void NMS(const phi::GPUContext &ctx, const DenseTensor &proposals, const DenseTensor &sorted_indices, const T nms_threshold, DenseTensor *keep_out, bool pixel_offset = true) { int boxes_num = proposals.dims()[0]; const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock); dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock), DIVUP(boxes_num, kThreadsPerBlock)); dim3 threads(kThreadsPerBlock); const T *boxes = proposals.data<T>(); auto place = ctx.GetPlace(); auto mask_ptr = phi::memory_utils::Alloc( place, boxes_num * col_blocks * sizeof(uint64_t), phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream()))); uint64_t *mask_dev = reinterpret_cast<uint64_t *>(mask_ptr->ptr()); NMSKernel<<<blocks, threads, 0, ctx.stream()>>>( boxes_num, nms_threshold, boxes, mask_dev, pixel_offset); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); std::vector<uint64_t> mask_host(boxes_num * col_blocks); memory_utils::Copy(CPUPlace(), mask_host.data(), place, mask_dev, boxes_num * col_blocks * sizeof(uint64_t), ctx.stream()); std::vector<int> keep_vec; int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / kThreadsPerBlock; int inblock = i % kThreadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { ++num_to_keep; keep_vec.push_back(i); uint64_t *p = mask_host.data() + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } keep_out->Resize(phi::make_ddim({num_to_keep})); int *keep = ctx.template Alloc<int>(keep_out); memory_utils::Copy(place, keep, CPUPlace(), keep_vec.data(), sizeof(int) * num_to_keep, ctx.stream()); ctx.Wait(); } template <typename T> static std::pair<DenseTensor, DenseTensor> ProposalForOneImage( const phi::GPUContext &ctx, const DenseTensor &im_shape, const DenseTensor &anchors, const DenseTensor &variances, const DenseTensor &bbox_deltas, // [M, 4] const DenseTensor &scores, // [N, 1] int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset) { // 1. pre nms DenseTensor scores_sort, index_sort; SortDescending<T>(ctx, scores, &scores_sort, &index_sort); int num = scores.numel(); int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel() : pre_nms_top_n; scores_sort.Resize(phi::make_ddim({pre_nms_num, 1})); index_sort.Resize(phi::make_ddim({pre_nms_num, 1})); // 2. box decode and clipping DenseTensor proposals; proposals.Resize(phi::make_ddim({pre_nms_num, 4})); ctx.template Alloc<T>(&proposals); { phi::funcs::ForRange<phi::GPUContext> for_range(ctx, pre_nms_num); for_range(BoxDecodeAndClipFunctor<T>{anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(), index_sort.data<int>(), im_shape.data<T>(), proposals.data<T>(), pixel_offset}); } // 3. filter DenseTensor keep_index, keep_num_t; keep_index.Resize(phi::make_ddim({pre_nms_num})); ctx.template Alloc<int>(&keep_index); keep_num_t.Resize(phi::make_ddim({1})); ctx.template Alloc<int>(&keep_num_t); min_size = std::max(min_size, 1.0f); auto stream = ctx.stream(); FilterBBoxes<T, 512><<<1, 512, 0, stream>>>(proposals.data<T>(), im_shape.data<T>(), min_size, pre_nms_num, keep_num_t.data<int>(), keep_index.data<int>(), false, pixel_offset); int keep_num; const auto gpu_place = ctx.GetPlace(); memory_utils::Copy(CPUPlace(), &keep_num, gpu_place, keep_num_t.data<int>(), sizeof(int), ctx.stream()); ctx.Wait(); keep_index.Resize(phi::make_ddim({keep_num})); DenseTensor scores_filter, proposals_filter; // Handle the case when there is no keep index left if (keep_num == 0) { phi::funcs::SetConstant<phi::GPUContext, T> set_zero; proposals_filter.Resize(phi::make_ddim({1, 4})); ctx.template Alloc<T>(&proposals_filter); scores_filter.Resize(phi::make_ddim({1, 1})); ctx.template Alloc<T>(&scores_filter); set_zero(ctx, &proposals_filter, static_cast<T>(0)); set_zero(ctx, &scores_filter, static_cast<T>(0)); return std::make_pair(proposals_filter, scores_filter); } proposals_filter.Resize(phi::make_ddim({keep_num, 4})); ctx.template Alloc<T>(&proposals_filter); scores_filter.Resize(phi::make_ddim({keep_num, 1})); ctx.template Alloc<T>(&scores_filter); phi::funcs::GPUGather<T>(ctx, proposals, keep_index, &proposals_filter); phi::funcs::GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter); if (nms_thresh <= 0) { return std::make_pair(proposals_filter, scores_filter); } // 4. nms DenseTensor keep_nms; NMS<T>( ctx, proposals_filter, keep_index, nms_thresh, &keep_nms, pixel_offset); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize(phi::make_ddim({post_nms_top_n})); } DenseTensor scores_nms, proposals_nms; proposals_nms.Resize(phi::make_ddim({keep_nms.numel(), 4})); ctx.template Alloc<T>(&proposals_nms); scores_nms.Resize(phi::make_ddim({keep_nms.numel(), 1})); ctx.template Alloc<T>(&scores_nms); phi::funcs::GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms); phi::funcs::GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms); return std::make_pair(proposals_nms, scores_nms); } template <typename T, typename Context> void GenerateProposalsKernel(const Context &ctx, const DenseTensor &scores, const DenseTensor &bbox_deltas, const DenseTensor &im_shape, const DenseTensor &anchors, const DenseTensor &variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset, DenseTensor *rpn_rois, DenseTensor *rpn_roi_probs, DenseTensor *rpn_rois_num) { PADDLE_ENFORCE_GE( eta, 1., errors::InvalidArgument("Not support adaptive NMS. The attribute 'eta' " "should not less than 1. But received eta=[%d]", eta)); auto scores_dim = scores.dims(); int64_t num = scores_dim[0]; int64_t c_score = scores_dim[1]; int64_t h_score = scores_dim[2]; int64_t w_score = scores_dim[3]; auto bbox_dim = bbox_deltas.dims(); int64_t c_bbox = bbox_dim[1]; int64_t h_bbox = bbox_dim[2]; int64_t w_bbox = bbox_dim[3]; DenseTensor bbox_deltas_swap, scores_swap; bbox_deltas_swap.Resize(phi::make_ddim({num, h_bbox, w_bbox, c_bbox})); ctx.template Alloc<T>(&bbox_deltas_swap); scores_swap.Resize(phi::make_ddim({num, h_score, w_score, c_score})); ctx.template Alloc<T>(&scores_swap); phi::funcs::Transpose<phi::GPUContext, T, 4> trans; std::vector<int> axis = {0, 2, 3, 1}; trans(ctx, bbox_deltas, &bbox_deltas_swap, axis); trans(ctx, scores, &scores_swap, axis); DenseTensor tmp_anchors = anchors; DenseTensor tmp_variances = variances; tmp_anchors.Resize(phi::make_ddim({tmp_anchors.numel() / 4, 4})); tmp_variances.Resize(phi::make_ddim({tmp_variances.numel() / 4, 4})); rpn_rois->Resize(phi::make_ddim({bbox_deltas.numel() / 4, 4})); ctx.template Alloc<T>(rpn_rois); rpn_roi_probs->Resize(phi::make_ddim({scores.numel(), 1})); ctx.template Alloc<T>(rpn_roi_probs); T *rpn_rois_data = rpn_rois->data<T>(); T *rpn_roi_probs_data = rpn_roi_probs->data<T>(); auto place = ctx.GetPlace(); auto cpu_place = phi::CPUPlace(); int64_t num_proposals = 0; std::vector<size_t> offset(1, 0); std::vector<int> tmp_num; for (int64_t i = 0; i < num; ++i) { DenseTensor im_shape_slice = im_shape.Slice(i, i + 1); DenseTensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1); DenseTensor scores_slice = scores_swap.Slice(i, i + 1); bbox_deltas_slice.Resize(phi::make_ddim({h_bbox * w_bbox * c_bbox / 4, 4})); scores_slice.Resize(phi::make_ddim({h_score * w_score * c_score, 1})); std::pair<DenseTensor, DenseTensor> box_score_pair = ProposalForOneImage<T>(ctx, im_shape_slice, tmp_anchors, tmp_variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta, pixel_offset); DenseTensor &proposals = box_score_pair.first; DenseTensor &nscores = box_score_pair.second; memory_utils::Copy(place, rpn_rois_data + num_proposals * 4, place, proposals.data<T>(), sizeof(T) * proposals.numel(), ctx.stream()); memory_utils::Copy(place, rpn_roi_probs_data + num_proposals, place, nscores.data<T>(), sizeof(T) * nscores.numel(), ctx.stream()); ctx.Wait(); num_proposals += proposals.dims()[0]; offset.emplace_back(num_proposals); tmp_num.push_back(proposals.dims()[0]); } if (rpn_rois_num != nullptr) { rpn_rois_num->Resize(phi::make_ddim({num})); ctx.template Alloc<int>(rpn_rois_num); int *num_data = rpn_rois_num->data<int>(); memory_utils::Copy(place, num_data, cpu_place, &tmp_num[0], sizeof(int) * num, ctx.stream()); rpn_rois_num->Resize(phi::make_ddim({num})); } phi::LoD lod; lod.emplace_back(offset); rpn_rois->Resize(phi::make_ddim({num_proposals, 4})); rpn_roi_probs->Resize(phi::make_ddim({num_proposals, 1})); } } // namespace phi PD_REGISTER_KERNEL( generate_proposals, GPU, ALL_LAYOUT, phi::GenerateProposalsKernel, float) { kernel->OutputAt(2).SetDataType(phi::DataType::INT32); }
071a2e8e42a4d548b0e3a19699ae3b3d5e6d38ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #define EPSILON 0.01 //NUM_THREADS is equal to N must be 2^k where k<=10 #define NUM_THREADS 1024 //#define DEBUG 0 #include <hipcub/hipcub.hpp> __global__ void constructadjmat(const int n,const float *src, const float *tgt, float* adj, float* prices, int* src2tgt, int*tgt2src, float* average, int* d_offsets, int* d_offsets_end) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int idx = index; idx < n*n; idx += stride) { int i = idx/n; int j = idx-n*i; float x1=src[i*3]; float y1=src[i*3+1]; float z1=src[i*3+2]; float x2=tgt[j*3]; float y2=tgt[j*3+1]; float z2=tgt[j*3+2]; double d1=x1-x2; double d2=y1-y2; double d3=z1-z2; double d =d1*d1+d2*d2+d3*d3; adj[i*n+j]=d; atomicAdd(average, d*1.0/(n*n)); } for (int idx = index; idx < n; idx += stride) { prices[idx]=0; src2tgt[idx]=-1; tgt2src[idx]=-1; d_offsets[idx]=idx*n; d_offsets_end[idx]=idx*n+n; } } __global__ void calcbidvalues(int n, int* src2tgt, float* adj, float* prices, bool* complete, float* values, float* bids) { int INDEX = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int idx = INDEX; idx < n*n; idx += stride) { int i=idx/n; int j= idx-i*n; bids[i*n+j]=-1; if(src2tgt[i]!=-1) { continue; } complete[0]=false; values[i*n+j]= -adj[i*n+j]-prices[j]; } } __global__ void submitbids3(const int n, int* src2tgt, float* prices, float* bids, float best[], int index[], hipcub::KeyValuePair<int,float>* d_max, double eps) { int idx=threadIdx.x+blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; for(int i= idx; i < n; i+=stride) { if(src2tgt[i]==-1) { float best2 =d_max[i].value; bids[index[i]*n+i] = prices[index[i]]+best[i]-best2+eps; } } } __global__ void submitbids2(const int n,int* src2tgt, float best[], int index[], float* values, hipcub::KeyValuePair<int,float>* d_max) { int stride = blockDim.x * gridDim.x; int idx=threadIdx.x+blockIdx.x*blockDim.x; for(int i= idx; i < n; i+=stride) { if(src2tgt[i]==-1) { best[i]=d_max[i].value; index[i]=d_max[i].key; values[i*n+index[i]]=INT_MIN; } } } void submitbids(const int n, int* src2tgt, float* prices, float* bids, float* values, double eps, void* d_temp_storage, size_t temp_storage_bytes, hipcub::KeyValuePair<int,float>* d_max, int* d_offsets, int* d_offsets_end, float* best, int* index) { int count=0; int check[n]; hipcub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, values, d_max, n, d_offsets, d_offsets_end); hipDeviceSynchronize(); int num_blocks=::ceil((n)/NUM_THREADS); hipLaunchKernelGGL(( submitbids2), dim3(num_blocks), dim3(NUM_THREADS), 0, 0, n, src2tgt, best, index, values, d_max); hipcub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, values, d_max, n, d_offsets, d_offsets_end); hipDeviceSynchronize(); // for(int i=0; i < n; i++) // std::cout<<prices[index[i]]<<" "<<best[i]<<" "<<d_max[i].value<<" "<<eps<<" "<<d_offsets[i]<<" "<<d_offsets_end[i]<<std::endl; hipLaunchKernelGGL(( submitbids3), dim3(num_blocks), dim3(NUM_THREADS), 0, 0, n, src2tgt, prices, bids, best, index, d_max,eps); } __global__ void processbids2(const int n, float* bids, int* src2tgt, int* tgt2src, float* prices, float*adj, hipcub::KeyValuePair<int,float> *d_max, int* d_offsets, int* d_offsets_end, float* result, float* result2) { int idx=threadIdx.x+blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; for(int j= idx; j < n; j+=stride) { float best=d_max[j].value; int index=d_max[j].key; if( best != -1) { int prev=tgt2src[j]; if(prev!=-1) { src2tgt[prev]=-1; d_offsets[prev]=prev*n; d_offsets_end[prev]=prev*n+n; } src2tgt[index]=j; result[index]=adj[index*n+j]; d_offsets[index]=index*n; d_offsets_end[index]=index*n; tgt2src[j]=index; result2[j]=adj[index*n+j]; prices[j]=bids[j*n+index]; } } } void processbids(const int n, float* bids, int* src2tgt, int* tgt2src, float* prices, float*adj, void* d_temp_storage, size_t temp_storage_bytes, hipcub::KeyValuePair<int,float>* d_max, int* d_offsets, int* d_offsets_end, float* result, float* result2) { hipcub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, bids, d_max, n, d_offsets, d_offsets+1); hipDeviceSynchronize(); int num_blocks=::ceil((n)/NUM_THREADS); hipLaunchKernelGGL(( processbids2), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, n,bids,src2tgt,tgt2src,prices,adj, d_max, d_offsets, d_offsets_end, result, result2); } void NnEMDDistanceKernelLauncher(int b,const int N,const float * src,const float * tgt,float * result,int * src2tgt,float * result2,int * tgt2src){ //N=NUM_THREADS; //float* src, *tgt; float *adj, *prices,*bids, *values; //int *src2tgt, *tgt2src; bool* complete; float* average; float* best; int *index; int* d_offsets, *d_offsets_end; // Allocate Unified Memory accessible from CPU or GPU //hipMallocManaged(&src, 3*N*sizeof(float)); //hipMallocManaged(&tgt, 3*N*sizeof(float)); hipMallocManaged(&adj, N*N*sizeof(float)); hipMallocManaged(&prices, N*sizeof(float)); //init to 0 hipMallocManaged(&bids, N*N*sizeof(float)); hipMallocManaged(&values, N*N*sizeof(float)); hipMallocManaged(&complete, sizeof(bool)); hipMallocManaged(&average, sizeof(float)); hipMallocManaged(&d_offsets, (N+1)*sizeof(int)); hipMallocManaged(&d_offsets_end, N*sizeof(int)); hipMallocManaged(&best, N*sizeof(float)); hipMallocManaged(&index, N*sizeof(int)); d_offsets[N]=N*N; void* d_temp_storage=NULL; size_t temp_storage_bytes = 0; hipcub::KeyValuePair<int,float>* d_max;//(float**) malloc(N*sizeof(float*)) hipMallocManaged(&d_max, N*sizeof(hipcub::KeyValuePair<int,float>)); hipcub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, values, d_max, N, d_offsets, d_offsets_end); hipMallocManaged(&d_temp_storage, temp_storage_bytes*N); #if DEBUG std::cout<<"SOURCE TGT: "<<std::endl; for(int i=0; i < N; i++) { int idx=i*3; printf("%f %f %f %f %f %f\n",src[idx], src[idx+1], src[idx+2], tgt[idx], tgt[idx+1], tgt[idx+2]); } #endif int num_blocks=::min(::ceil((N*N)/NUM_THREADS), 65535.0); hipLaunchKernelGGL(( constructadjmat), dim3(num_blocks), dim3(NUM_THREADS), 0, 0, N, src, tgt, adj, prices, src2tgt, tgt2src, average, d_offsets, d_offsets_end); #ifdef DEBUG hipDeviceSynchronize(); printf("\n "); for(int i=0; i <8;i++) printf("%3d ",i); printf("\n"); for(int i= 0; i < 8; i++) { printf("%3d ",i); for(int j=0; j < 8; j++) { printf("%03d ", (int)adj[i*N+j]); } std::cout<<std::endl; } #endif complete[0]=false; int iter=0; while(1) { iter++; num_blocks=::min(::ceil((N*N)/NUM_THREADS), 65535.0); hipLaunchKernelGGL(( calcbidvalues), dim3(num_blocks), dim3(NUM_THREADS), 0, 0, N, src2tgt, adj, prices, complete, values, bids); #ifdef DEBUG hipDeviceSynchronize(); printf("VALS: \n "); for(int i=0; i <N;i++) printf("%4d ",i); printf("\n"); for(int i= 0; i < N; i++) { printf("%4d ",i); for(int j=0; j < N; j++) { printf("%04d ", (int)values[i*N+j]); } std::cout<<std::endl; } #endif hipDeviceSynchronize(); if(complete[0]) break; complete[0]=true; submitbids(N, src2tgt, prices, bids, values, EPSILON*average[0]*pow(2,iter/200), d_temp_storage, temp_storage_bytes, d_max, d_offsets, d_offsets_end, best, index); #if DEBUG hipDeviceSynchronize(); printf("BIDS: \n "); for(int i=0; i <N;i++) printf("%3d ",i); printf("\n"); for(int i= 0; i < N; i++) { printf("%3d ",i); for(int j=0; j < N; j++) { printf("%03d ", (int)bids[i*N+j]); } std::cout<<std::endl; } #endif //find max bid for each project processbids(N, bids, src2tgt, tgt2src, prices, adj, d_temp_storage, temp_storage_bytes, d_max, d_offsets, d_offsets_end,result,result2); #if DEBUG hipDeviceSynchronize(); printf("Src2tgt: \n "); for(int i=0; i <N;i++) printf("%3d ",src2tgt[i]); printf("\n"); printf("Prices: \n "); for(int i=0; i <N;i++) printf("%3d ", (int)prices[i]); printf("\n"); char c; get(c); #endif } //std::cout<<"ITERS: "<<N<<" "<<iter<<std::endl; hipFree(values); hipFree(adj); hipFree(prices); hipFree(bids); hipFree(complete); hipFree(average); hipFree(d_offsets); hipFree(d_offsets_end); hipFree(best); hipFree(index); hipFree(d_temp_storage); hipFree(d_max); } #endif
071a2e8e42a4d548b0e3a19699ae3b3d5e6d38ed.cu
#if GOOGLE_CUDA #define EIGEN_USE_GPU #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #define EPSILON 0.01 //NUM_THREADS is equal to N must be 2^k where k<=10 #define NUM_THREADS 1024 //#define DEBUG 0 #include <cub/cub.cuh> __global__ void constructadjmat(const int n,const float *src, const float *tgt, float* adj, float* prices, int* src2tgt, int*tgt2src, float* average, int* d_offsets, int* d_offsets_end) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int idx = index; idx < n*n; idx += stride) { int i = idx/n; int j = idx-n*i; float x1=src[i*3]; float y1=src[i*3+1]; float z1=src[i*3+2]; float x2=tgt[j*3]; float y2=tgt[j*3+1]; float z2=tgt[j*3+2]; double d1=x1-x2; double d2=y1-y2; double d3=z1-z2; double d =d1*d1+d2*d2+d3*d3; adj[i*n+j]=d; atomicAdd(average, d*1.0/(n*n)); } for (int idx = index; idx < n; idx += stride) { prices[idx]=0; src2tgt[idx]=-1; tgt2src[idx]=-1; d_offsets[idx]=idx*n; d_offsets_end[idx]=idx*n+n; } } __global__ void calcbidvalues(int n, int* src2tgt, float* adj, float* prices, bool* complete, float* values, float* bids) { int INDEX = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int idx = INDEX; idx < n*n; idx += stride) { int i=idx/n; int j= idx-i*n; bids[i*n+j]=-1; if(src2tgt[i]!=-1) { continue; } complete[0]=false; values[i*n+j]= -adj[i*n+j]-prices[j]; } } __global__ void submitbids3(const int n, int* src2tgt, float* prices, float* bids, float best[], int index[], cub::KeyValuePair<int,float>* d_max, double eps) { int idx=threadIdx.x+blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; for(int i= idx; i < n; i+=stride) { if(src2tgt[i]==-1) { float best2 =d_max[i].value; bids[index[i]*n+i] = prices[index[i]]+best[i]-best2+eps; } } } __global__ void submitbids2(const int n,int* src2tgt, float best[], int index[], float* values, cub::KeyValuePair<int,float>* d_max) { int stride = blockDim.x * gridDim.x; int idx=threadIdx.x+blockIdx.x*blockDim.x; for(int i= idx; i < n; i+=stride) { if(src2tgt[i]==-1) { best[i]=d_max[i].value; index[i]=d_max[i].key; values[i*n+index[i]]=INT_MIN; } } } void submitbids(const int n, int* src2tgt, float* prices, float* bids, float* values, double eps, void* d_temp_storage, size_t temp_storage_bytes, cub::KeyValuePair<int,float>* d_max, int* d_offsets, int* d_offsets_end, float* best, int* index) { int count=0; int check[n]; cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, values, d_max, n, d_offsets, d_offsets_end); cudaDeviceSynchronize(); int num_blocks=std::ceil((n)/NUM_THREADS); submitbids2<<<num_blocks, NUM_THREADS>>>(n, src2tgt, best, index, values, d_max); cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, values, d_max, n, d_offsets, d_offsets_end); cudaDeviceSynchronize(); // for(int i=0; i < n; i++) // std::cout<<prices[index[i]]<<" "<<best[i]<<" "<<d_max[i].value<<" "<<eps<<" "<<d_offsets[i]<<" "<<d_offsets_end[i]<<std::endl; submitbids3<<<num_blocks, NUM_THREADS>>>(n, src2tgt, prices, bids, best, index, d_max,eps); } __global__ void processbids2(const int n, float* bids, int* src2tgt, int* tgt2src, float* prices, float*adj, cub::KeyValuePair<int,float> *d_max, int* d_offsets, int* d_offsets_end, float* result, float* result2) { int idx=threadIdx.x+blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; for(int j= idx; j < n; j+=stride) { float best=d_max[j].value; int index=d_max[j].key; if( best != -1) { int prev=tgt2src[j]; if(prev!=-1) { src2tgt[prev]=-1; d_offsets[prev]=prev*n; d_offsets_end[prev]=prev*n+n; } src2tgt[index]=j; result[index]=adj[index*n+j]; d_offsets[index]=index*n; d_offsets_end[index]=index*n; tgt2src[j]=index; result2[j]=adj[index*n+j]; prices[j]=bids[j*n+index]; } } } void processbids(const int n, float* bids, int* src2tgt, int* tgt2src, float* prices, float*adj, void* d_temp_storage, size_t temp_storage_bytes, cub::KeyValuePair<int,float>* d_max, int* d_offsets, int* d_offsets_end, float* result, float* result2) { cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, bids, d_max, n, d_offsets, d_offsets+1); cudaDeviceSynchronize(); int num_blocks=std::ceil((n)/NUM_THREADS); processbids2<<<num_blocks,NUM_THREADS>>>(n,bids,src2tgt,tgt2src,prices,adj, d_max, d_offsets, d_offsets_end, result, result2); } void NnEMDDistanceKernelLauncher(int b,const int N,const float * src,const float * tgt,float * result,int * src2tgt,float * result2,int * tgt2src){ //N=NUM_THREADS; //float* src, *tgt; float *adj, *prices,*bids, *values; //int *src2tgt, *tgt2src; bool* complete; float* average; float* best; int *index; int* d_offsets, *d_offsets_end; // Allocate Unified Memory – accessible from CPU or GPU //cudaMallocManaged(&src, 3*N*sizeof(float)); //cudaMallocManaged(&tgt, 3*N*sizeof(float)); cudaMallocManaged(&adj, N*N*sizeof(float)); cudaMallocManaged(&prices, N*sizeof(float)); //init to 0 cudaMallocManaged(&bids, N*N*sizeof(float)); cudaMallocManaged(&values, N*N*sizeof(float)); cudaMallocManaged(&complete, sizeof(bool)); cudaMallocManaged(&average, sizeof(float)); cudaMallocManaged(&d_offsets, (N+1)*sizeof(int)); cudaMallocManaged(&d_offsets_end, N*sizeof(int)); cudaMallocManaged(&best, N*sizeof(float)); cudaMallocManaged(&index, N*sizeof(int)); d_offsets[N]=N*N; void* d_temp_storage=NULL; size_t temp_storage_bytes = 0; cub::KeyValuePair<int,float>* d_max;//(float**) malloc(N*sizeof(float*)) cudaMallocManaged(&d_max, N*sizeof(cub::KeyValuePair<int,float>)); cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, values, d_max, N, d_offsets, d_offsets_end); cudaMallocManaged(&d_temp_storage, temp_storage_bytes*N); #if DEBUG std::cout<<"SOURCE TGT: "<<std::endl; for(int i=0; i < N; i++) { int idx=i*3; printf("%f %f %f %f %f %f\n",src[idx], src[idx+1], src[idx+2], tgt[idx], tgt[idx+1], tgt[idx+2]); } #endif int num_blocks=std::min(std::ceil((N*N)/NUM_THREADS), 65535.0); constructadjmat<<<num_blocks, NUM_THREADS>>>(N, src, tgt, adj, prices, src2tgt, tgt2src, average, d_offsets, d_offsets_end); #ifdef DEBUG cudaDeviceSynchronize(); printf("\n "); for(int i=0; i <8;i++) printf("%3d ",i); printf("\n"); for(int i= 0; i < 8; i++) { printf("%3d ",i); for(int j=0; j < 8; j++) { printf("%03d ", (int)adj[i*N+j]); } std::cout<<std::endl; } #endif complete[0]=false; int iter=0; while(1) { iter++; num_blocks=std::min(std::ceil((N*N)/NUM_THREADS), 65535.0); calcbidvalues<<<num_blocks, NUM_THREADS>>>(N, src2tgt, adj, prices, complete, values, bids); #ifdef DEBUG cudaDeviceSynchronize(); printf("VALS: \n "); for(int i=0; i <N;i++) printf("%4d ",i); printf("\n"); for(int i= 0; i < N; i++) { printf("%4d ",i); for(int j=0; j < N; j++) { printf("%04d ", (int)values[i*N+j]); } std::cout<<std::endl; } #endif cudaDeviceSynchronize(); if(complete[0]) break; complete[0]=true; submitbids(N, src2tgt, prices, bids, values, EPSILON*average[0]*pow(2,iter/200), d_temp_storage, temp_storage_bytes, d_max, d_offsets, d_offsets_end, best, index); #if DEBUG cudaDeviceSynchronize(); printf("BIDS: \n "); for(int i=0; i <N;i++) printf("%3d ",i); printf("\n"); for(int i= 0; i < N; i++) { printf("%3d ",i); for(int j=0; j < N; j++) { printf("%03d ", (int)bids[i*N+j]); } std::cout<<std::endl; } #endif //find max bid for each project processbids(N, bids, src2tgt, tgt2src, prices, adj, d_temp_storage, temp_storage_bytes, d_max, d_offsets, d_offsets_end,result,result2); #if DEBUG cudaDeviceSynchronize(); printf("Src2tgt: \n "); for(int i=0; i <N;i++) printf("%3d ",src2tgt[i]); printf("\n"); printf("Prices: \n "); for(int i=0; i <N;i++) printf("%3d ", (int)prices[i]); printf("\n"); char c; get(c); #endif } //std::cout<<"ITERS: "<<N<<" "<<iter<<std::endl; cudaFree(values); cudaFree(adj); cudaFree(prices); cudaFree(bids); cudaFree(complete); cudaFree(average); cudaFree(d_offsets); cudaFree(d_offsets_end); cudaFree(best); cudaFree(index); cudaFree(d_temp_storage); cudaFree(d_max); } #endif
255f23fa8dbe782de071e006abdd446ae0c133ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ // Written by Angelos Katharopoulos <[email protected]>, // Apoorv Vyas <[email protected]> // // // For modifications made inside namespace nvidia (authored by jdemouth): // // Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // #include <torch/extension.h> #include <assert.h> #include <stdio.h> #define ENABLE_NVIDIA_OPTIMIZATIONS #ifdef ENABLE_NVIDIA_OPTIMIZATIONS namespace nvidia { //////////////////////////////////////////////////////////////////////////////////////////////////// constexpr int THREADS_PER_WARP = 32; //////////////////////////////////////////////////////////////////////////////////////////////////// constexpr int LOW_OCCUPANCY_THRESHOLD = 40; // TODO: Make it HW specific (like 1/2 SMs). //////////////////////////////////////////////////////////////////////////////////////////////////// static inline __device__ __host__ int div_up(int m, int n) { return (m + n-1) / n; } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline __device__ __host__ int round_up(int m, int n) { return div_up(m, n) * n; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > struct Lmha_params { // The output buffer. Dimensions [B, H, L, M]. T *out; // The input Qs. Dimensions [B, H, L, E]. const T *q; // The input Ks. Dimensions [B, H, L, E]. const T *k; // The input Vs. Dimensions [B, H, L, M]. const T *v; // The different dimensions. int B, L, H, E, M; // The strides for the different tensors. int q_stride_B, q_stride_H, q_stride_L; int k_stride_B, k_stride_H, k_stride_L; int v_stride_B, v_stride_H, v_stride_L; int o_stride_B, o_stride_H, o_stride_L; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, bool GO_BACKWARD, int WARPS, int COLS_PER_THREAD = 4 > __global__ __launch_bounds__(WARPS * THREADS_PER_WARP) void lmha_low_occupancy_kernel(Lmha_params<float> params) { // The number of threads per block. constexpr int THREADS_PER_BLOCK = WARPS * THREADS_PER_WARP; // The number of rows per thread. constexpr int ROWS_PER_THREAD = E / THREADS_PER_WARP; // The number of steps per iteration. constexpr int COLS_PER_ITER = WARPS * COLS_PER_THREAD; // Make sure E is a multiple of the warp size. static_assert(E % THREADS_PER_WARP == 0, ""); // Shared memory to store V/O. __shared__ float smem_v[COLS_PER_ITER], smem_o[COLS_PER_ITER]; // Shared memory buffer to performance the reductions. __shared__ float smem_reds[E * WARPS]; // The sequence processed by that block. const int bi = blockIdx.z; // The head processed by that block. const int hi = blockIdx.y; // The hidden cell in the V/output buffers. const int vi = blockIdx.x; // The linear index of the thread. const int tidx = threadIdx.x; // Decompose the block in warp/lane. const int warp = tidx / THREADS_PER_WARP; const int lane = tidx % THREADS_PER_WARP; // The base offset loaded by the thread in Q and K. int offset_q = bi*params.q_stride_B + hi*params.q_stride_H + lane; int offset_k = bi*params.k_stride_B + hi*params.k_stride_H + lane; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_q += (params.L-1)*params.q_stride_L; offset_k += (params.L-1)*params.k_stride_L; } // Position the warp at the beginning of the proper timestep. if( GO_BACKWARD ) { offset_q -= warp*COLS_PER_THREAD*params.q_stride_L; offset_k -= warp*COLS_PER_THREAD*params.k_stride_L; } else { offset_q += warp*COLS_PER_THREAD*params.q_stride_L; offset_k += warp*COLS_PER_THREAD*params.k_stride_L; } // Determine the base pointers for Q and K. const float *ptr_q = &params.q[offset_q]; const float *ptr_k = &params.k[offset_k]; // Is a given row valid? int valid_qk[ROWS_PER_THREAD]; #pragma unroll for( int ii = 0; ii < ROWS_PER_THREAD; ++ii ) { valid_qk[ii] = lane + ii*THREADS_PER_WARP < params.E; } // The offset to the position loaded by the thread in V. int offset_v = bi*params.v_stride_B + hi*params.v_stride_H + vi; int offset_o = bi*params.o_stride_B + hi*params.o_stride_H + vi; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_v += (params.L-1)*params.v_stride_L; offset_o += (params.L-1)*params.o_stride_L; } // We load/store a strided matrix of COLS_PER_ITER x OUTPUTS_PER_BLOCK. if( GO_BACKWARD ) { offset_v -= tidx*params.v_stride_L; offset_o -= tidx*params.o_stride_L; } else { offset_v += tidx*params.v_stride_L; offset_o += tidx*params.o_stride_L; } // Determine the base pointer for V. const float *ptr_v = &params.v[offset_v]; // The output pointer. float *ptr_o = &params.out[offset_o]; // The running KVs. float running_kv[ROWS_PER_THREAD]; #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { running_kv[ri] = 0.f; } // Iterate over the timesteps. TODO: Use params.loop_count!!! for( int iter = 0; iter < params.L; iter += COLS_PER_ITER ) { // Each thread loads a matrix of elements. float q[ROWS_PER_THREAD][COLS_PER_THREAD], k[ROWS_PER_THREAD][COLS_PER_THREAD]; // Trigger the memory loads for Q and K. #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { // For Q/K, each warp loads from various timesteps. int ti = iter + warp*COLS_PER_THREAD; if( GO_BACKWARD ) { ti = params.L - 1 - ti; } // Is it a valid access? int valid; if( GO_BACKWARD ) { valid = valid_qk[ri] && ti - ci >= 0; } else { valid = valid_qk[ri] && ti + ci < params.L; } // The extra offset to add. if( GO_BACKWARD ) { offset_q = ri*THREADS_PER_WARP - ci*params.q_stride_L; offset_k = ri*THREADS_PER_WARP - ci*params.k_stride_L; } else { offset_q = ri*THREADS_PER_WARP + ci*params.q_stride_L; offset_k = ri*THREADS_PER_WARP + ci*params.k_stride_L; } // Load Q/K if they are valid. q[ri][ci] = valid ? ptr_q[offset_q] : 0.f; k[ri][ci] = valid ? ptr_k[offset_k] : 0.f; } } // For the V tensor, we assign contiguous thread to different loads. So, ti is different. int ti = iter + tidx; if( GO_BACKWARD ) { ti = params.L - 1 - ti; } // Is it a valid access? int valid_vo = tidx < COLS_PER_ITER; if( GO_BACKWARD ) { valid_vo &= ti >= 0; } else { valid_vo &= ti < params.L; } // Trigger the loads for V. float ldg_v = valid_vo ? *ptr_v : 0.f; // Move the load pointers. if( GO_BACKWARD ) { ptr_q -= COLS_PER_ITER*params.q_stride_L; ptr_k -= COLS_PER_ITER*params.k_stride_L; ptr_v -= COLS_PER_ITER*params.v_stride_L; } else { ptr_q += COLS_PER_ITER*params.q_stride_L; ptr_k += COLS_PER_ITER*params.k_stride_L; ptr_v += COLS_PER_ITER*params.v_stride_L; } // Store to shared memory. if( tidx < COLS_PER_ITER ) { smem_v[tidx] = ldg_v; } // Make sure V is in shared memory. __syncthreads(); // Read V from shared memory. float v[COLS_PER_THREAD]; #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { v[ci] = smem_v[warp*COLS_PER_THREAD + ci]; } // Each thread computes local K*V products. float kv[ROWS_PER_THREAD][COLS_PER_THREAD]; #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { kv[ri][ci] = 0.f; } } // Update the K*V^T product. #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { kv[ri][ci] += k[ri][ci] * v[ci]; } } // We must perform the prefix sums within the thread-block. Start with the thread. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { #pragma unroll for( int ci = 1; ci < COLS_PER_THREAD; ++ci ) { kv[ri][ci] += kv[ri][ci-1]; } } // Store the partial sums to shared memory. Unless we have no inter-warp reduction to perform. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { smem_reds[warp*E + ri*THREADS_PER_WARP + lane] = kv[ri][COLS_PER_THREAD-1]; } // Make sure the data is in shared memory. __syncthreads(); // Each thread deals with one or more column(s) of the matrix. constexpr int SUMS_PER_THREAD = (E + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; #pragma unroll for( int ii = 0, idx = tidx; ii < SUMS_PER_THREAD; ++ii, idx += THREADS_PER_BLOCK ) { if( idx < E ) { float sum = smem_reds[idx]; #pragma unroll for( int jj = 1; jj < WARPS; ++jj ) { smem_reds[idx + jj*E] = sum += smem_reds[idx + jj*E]; } } } // Make sure the reductions are stored in shared memory. __syncthreads(); // Each thread updates his partial products. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { float sum = running_kv[ri]; if( warp > 0 ) { sum += smem_reds[(warp-1)*E + lane + ri*THREADS_PER_WARP]; } #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { kv[ri][ci] += sum; } } // Compute the partial output values for that thread. float sum[COLS_PER_THREAD]; #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { sum[ci] = q[0][ci] * kv[0][ci]; #pragma unroll for( int ri = 1; ri < ROWS_PER_THREAD; ++ri ) { sum[ci] += q[ri][ci] * kv[ri][ci]; } } // Run the parallel reductions inside the warp. #pragma unroll for( int mask = THREADS_PER_WARP / 2; mask >= 1; mask /= 2 ) { #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { sum[ci] += __shfl_xor_sync(uint32_t(-1), sum[ci], mask); } } // Store the final output to shared memory. if( lane == 0 ) { #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { smem_o[warp*COLS_PER_THREAD + ci] = sum[ci]; } } // Make sure the data is in shared memory. __syncthreads(); // Store the output. if( valid_vo ) { *ptr_o = smem_o[tidx]; } // Each thread updates his running kv. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { running_kv[ri] += smem_reds[(WARPS-1)*E + lane + ri*THREADS_PER_WARP]; } // Move to next location. if( GO_BACKWARD ) { ptr_o -= COLS_PER_ITER*params.o_stride_L; } else { ptr_o += COLS_PER_ITER*params.o_stride_L; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, bool GO_BACKWARD, int WARPS > int lmha_low_occupancy_(const Lmha_params<float> &params) { // Make sure we are not going to launch an invalid grid. if( params.H > 65535 || params.B > 65535 ) { return 1; } // Prepare the grid and trigger the CUDA kernel. dim3 grid; grid.x = params.M; grid.y = params.H; grid.z = params.B; hipLaunchKernelGGL(( lmha_low_occupancy_kernel<E, GO_BACKWARD, WARPS>), dim3(grid), dim3(WARPS*THREADS_PER_WARP), 0, 0, params); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, bool GO_BACKWARD > int lmha_low_occupancy_(const Lmha_params<float> &params, int blocks) { if( params.M * blocks >= 8*LOW_OCCUPANCY_THRESHOLD ) { return lmha_low_occupancy_<E, GO_BACKWARD, 4>(params); } else if( params.M * blocks >= 4*LOW_OCCUPANCY_THRESHOLD ) { return lmha_low_occupancy_<E, GO_BACKWARD, 8>(params); } else { return lmha_low_occupancy_<E, GO_BACKWARD, 16>(params); } return 1; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, typename Params > static inline __device__ __host__ int smem_buffer_elts_(const Params &params) { int M = round_up(params.M, 4); return 2*E + 2*M; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, int THREADS_PER_HEAD, bool GO_BACKWARD > __global__ void lmha_kernel(Lmha_params<float> params) { // Make sure E is a multiple of 4. static_assert(E % 4 == 0, ""); // The amount of shared memory per buffer (2 buffers for double-buffering). const int smem_buffer_elts = smem_buffer_elts_<E>(params); // The M dimension for shared memory. const int M = round_up(params.M, 4); // Shared memory to store Q, K and V. Size is 2*smem_buffer_elts. extern __shared__ float smem_[]; // The various shared memory buffers. float *smem_q = &smem_[0*E]; float *smem_k = &smem_[1*E]; float *smem_v = &smem_[2*E]; float *smem_o = &smem_[2*E + M]; // The index of the shared memory buffer (for double-buffering). int smem_curr = 0; // The sequence processed by that block. const int bi = blockIdx.y; // The head processed by that block. const int hi = blockIdx.x; // The linear index of the thread. const int tidx = threadIdx.x; // The offset to the position loaded by the thread in Q. int offset_q = bi*params.q_stride_B + hi*params.q_stride_H + tidx; // The offset to the position loaded by the thread in K. int offset_k = bi*params.k_stride_B + hi*params.k_stride_H + tidx; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_q += (params.L-1)*params.q_stride_L; offset_k += (params.L-1)*params.k_stride_L; } // Determine the base pointers for Q and K. const float *ptr_q = &params.q[offset_q]; const float *ptr_k = &params.k[offset_k]; // The offset to the position loaded by the thread in V and O. int offset_v = bi*params.v_stride_B + hi*params.v_stride_H + tidx; int offset_o = bi*params.o_stride_B + hi*params.o_stride_H + tidx; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_v += (params.L-1)*params.v_stride_L; offset_o += (params.L-1)*params.o_stride_L; } // Determine the base pointers for V. const float *ptr_v = &params.v[offset_v]; // Is it an active Q/K thread? const int active_qk = tidx < params.E; // Trigger the memory loads for Q and K. float ldg_q = 0.f, ldg_k = 0.f; if( active_qk ) { ldg_q = *ptr_q; ldg_k = *ptr_k; } // Is it an active V thread? const int active_v = tidx < params.M; // Trigger the memory loads for V. float ldg_v = 0.f; if( active_v ) { ldg_v = *ptr_v; } // Move the load pointers. if( GO_BACKWARD ) { ptr_q -= params.q_stride_L; ptr_k -= params.k_stride_L; ptr_v -= params.v_stride_L; } else { ptr_q += params.q_stride_L; ptr_k += params.k_stride_L; ptr_v += params.v_stride_L; } // The number of FLOAT4s per head. constexpr int FLOAT4s_PER_HEAD = E / 4; // The number of FLOAT4s per thread. constexpr int FLOAT4s_PER_THREAD = FLOAT4s_PER_HEAD / THREADS_PER_HEAD; // The storage for the K*V^T values. float4 kv[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { kv[ii] = make_float4(0.f, 0.f, 0.f, 0.f); } // The output pointer. float *out_ptr = &params.out[offset_o]; // Store to shared memory Q and K. if( tidx < E ) { smem_q[smem_curr*smem_buffer_elts + tidx] = ldg_q; smem_k[smem_curr*smem_buffer_elts + tidx] = ldg_k; } // Store to shared memory V. All threads store valid values. if( tidx < M ) { smem_v[smem_curr*smem_buffer_elts + tidx] = ldg_v; } // The position of the thread in the V dimension. int vo = tidx / THREADS_PER_HEAD; int vi = tidx % THREADS_PER_HEAD; // Iterate over the timesteps. for( int ti = 0; ti < params.L; ++ti ) { // Is it the last iteration? int is_last = ti == params.L - 1; // Trigger the next loads for Q and K. if( !is_last && active_qk ) { ldg_q = *ptr_q; ldg_k = *ptr_k; } // Trigger the next loads for V. if( !is_last && active_v ) { ldg_v = *ptr_v; } // Move the load pointers. if( GO_BACKWARD ) { ptr_q -= params.q_stride_L; ptr_k -= params.k_stride_L; ptr_v -= params.v_stride_L; } else { ptr_q += params.q_stride_L; ptr_k += params.k_stride_L; ptr_v += params.v_stride_L; } // Make sure the data is in shared memory. __syncthreads(); // Each thread loads 4 values from K. float4 k[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { int ki = tidx % THREADS_PER_HEAD * 4 + ii * THREADS_PER_HEAD * 4; k[ii] = *reinterpret_cast<const float4*>(&smem_k[smem_curr*smem_buffer_elts + ki]); } // Each thread loads a single V value. float v = 0.f; if( vo < params.M ) { v = *reinterpret_cast<const float *>(&smem_v[smem_curr*smem_buffer_elts + vo]); } // Update the K*V^T product. #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { kv[ii].x += k[ii].x * v; kv[ii].y += k[ii].y * v; kv[ii].z += k[ii].z * v; kv[ii].w += k[ii].w * v; } // Load the Q values from shared memory. float4 q[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { int qi = tidx % THREADS_PER_HEAD * 4 + ii * THREADS_PER_HEAD * 4; q[ii] = *reinterpret_cast<const float4*>(&smem_q[smem_curr*smem_buffer_elts + qi]); } // Compute the partial output value for that thread. float sum = 0.f; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { sum += q[ii].x * kv[ii].x; sum += q[ii].y * kv[ii].y; sum += q[ii].z * kv[ii].z; sum += q[ii].w * kv[ii].w; } // Finalize the computation of the sum (if we have more than 1 thread per head). if( THREADS_PER_HEAD > 1 ) { // Finalize the sum for each head. #pragma unroll for( int mask = THREADS_PER_HEAD / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Store to shared memory. if( vo < M && vi == 0 ) { smem_o[smem_curr*smem_buffer_elts + vo] = sum; } // Make sure the data is in shared memory. __syncthreads(); // Active threads read the data to store. if( active_v ) { sum = smem_o[smem_curr*smem_buffer_elts + tidx]; } } // THREADS_PER_HEAD > 1. // Store the output. All the threads are active. if( active_v ) { *out_ptr = sum; } // Move to next location. if( GO_BACKWARD ) { out_ptr -= params.o_stride_L; } else { out_ptr += params.o_stride_L; } // Move the shared memory buffer. smem_curr = (smem_curr + 1) % 2; // Store to shared memory for Q and K. if( !is_last && tidx < E ) { smem_q[smem_curr*smem_buffer_elts + tidx] = ldg_q; smem_k[smem_curr*smem_buffer_elts + tidx] = ldg_k; } // Store to shared memory for V. if( !is_last && tidx < M ) { smem_v[smem_curr*smem_buffer_elts + tidx] = ldg_v; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, int THREADS_PER_HEAD, bool GO_BACKWARD > int lmha_(const Lmha_params<float> &params) { // The M dimension rounded up to 4. int M = round_up(params.M, 4); // The number of threads in the block. int block = round_up(max(E, M*THREADS_PER_HEAD), 32); if( block > 512 || params.B > 65535 ) { return 1; } // Prepare the kernel. dim3 grid(params.H, params.B); size_t smem = smem_buffer_elts_<E>(params)*2*sizeof(float); hipLaunchKernelGGL(( lmha_kernel<E, THREADS_PER_HEAD, GO_BACKWARD>), dim3(grid), dim3(block), smem, 0, params); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< bool GO_BACKWARD > int lmha(const Lmha_params<float> &params) { int blocks = params.B * params.H; int res = 1; if( blocks < LOW_OCCUPANCY_THRESHOLD ) { if( params.E <= 32 ) { res = lmha_low_occupancy_< 32, GO_BACKWARD>(params, blocks); } else if( params.E <= 64 ) { res = lmha_low_occupancy_< 64, GO_BACKWARD>(params, blocks); } else if( params.E <= 128 ) { res = lmha_low_occupancy_<128, GO_BACKWARD>(params, blocks); } else if( params.E <= 256 ) { res = lmha_low_occupancy_<256, GO_BACKWARD>(params, blocks); } } else { if( params.E <= 32 ) { res = lmha_< 32, 1, GO_BACKWARD>(params); } else if( params.E <= 48 ) { res = lmha_< 48, 1, GO_BACKWARD>(params); } else if( params.E <= 64 ) { res = lmha_< 64, 1, GO_BACKWARD>(params); } else if( params.E <= 128 ) { res = lmha_<128, 2, GO_BACKWARD>(params); } else if( params.E <= 256 ) { res = lmha_<256, 4, GO_BACKWARD>(params); } } return res; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > inline void set_params(Lmha_params<T> &params, const torch::Tensor q, const torch::Tensor k, const torch::Tensor v, torch::Tensor o) { // Define the pointers. params.out = o.data_ptr<T>(); params.q = q.data_ptr<T>(); params.k = k.data_ptr<T>(); params.v = v.data_ptr<T>(); // Define the strides. params.q_stride_B = (int) q.stride(0); params.q_stride_H = (int) q.stride(1); params.q_stride_L = (int) q.stride(2); params.k_stride_B = (int) k.stride(0); params.k_stride_H = (int) k.stride(1); params.k_stride_L = (int) k.stride(2); params.v_stride_B = (int) v.stride(0); params.v_stride_H = (int) v.stride(1); params.v_stride_L = (int) v.stride(2); params.o_stride_B = (int) o.stride(0); params.o_stride_H = (int) o.stride(1); params.o_stride_L = (int) o.stride(2); // Extract the dimensions. int N = q.size(0); int H = q.size(1); int L = q.size(2); int E = q.size(3); int M = v.size(3); params.B = N; params.L = L; params.H = H; params.E = E; params.M = M; } //////////////////////////////////////////////////////////////////////////////////////////////////// int lmha_fwd(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, torch::Tensor product) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); // Make sure the inner-most dimension of the tensors is packed. assert(queries.stride(3) == 1); assert(keys .stride(3) == 1); assert(values .stride(3) == 1); assert(product.stride(3) == 1); // Extract the dimensions. int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size (3); // The structure of params. Lmha_params<float> params; set_params(params, queries, keys, values, product); // Launch the kernel. return lmha<false>(params); } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > struct Lmha_bwd_params { // The output buffer for K. Dimensions [B, H, L, D]. T *out_k; // The output buffer for V. Dimensions [B, H, L, D]. T *out_v; // The input Qs. Dimensions [B, H, L, D]. const T *q; // The input Ks. Dimensions [B, H, L, D]. const T *k; // The input Vs. Dimensions [B, H, L, D]. const T *v; // The input Gs. Dimensions [B, H, L, D]. const T *g; // The dimensions. int B, L, H, M, E; // The strides for the input tensors. int q_stride_B, q_stride_L, q_stride_H; int k_stride_B, k_stride_L, k_stride_H; int v_stride_B, v_stride_L, v_stride_H; int g_stride_B, g_stride_L, g_stride_H; // The strides for the outputs. int out_k_stride_B, out_k_stride_L, out_k_stride_H; int out_v_stride_B, out_v_stride_L, out_v_stride_H; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< int D, int THREADS_PER_HEAD > __global__ __launch_bounds__(D*THREADS_PER_HEAD*2) void lmha_bwd_kernel(Lmha_bwd_params<float> params) { // Make sure D is a multiple of 4. static_assert(D % 4 == 0, ""); // The shared memory buffers. __shared__ struct Smem { float qg[2*D], kv[2*D], out_kv[2*D]; } smem_[2]; // The index of the shared memory buffer (for double-buffering). int smem_curr = 0; // The sequence processed by that block. const int bi = blockIdx.y; // The head processed by that block. const int hi = blockIdx.x; // The linear index of the thread. const int tidx = threadIdx.x; // Split the threads into two slices. int so = tidx / (D*THREADS_PER_HEAD); int si = tidx % (D*THREADS_PER_HEAD); // The strides for B/L/H for the Q/G tensors. int qg_stride_B, qg_stride_L, qg_stride_H; if( so == 0 ) { qg_stride_B = params.q_stride_B; qg_stride_L = params.q_stride_L; qg_stride_H = params.q_stride_H; } else { qg_stride_B = params.g_stride_B; qg_stride_L = params.g_stride_L; qg_stride_H = params.g_stride_H; } // The strides for B/L/H for the K/V tensors. int kv_stride_B, kv_stride_L, kv_stride_H; if( so == 0 ) { kv_stride_B = params.k_stride_B; kv_stride_L = params.k_stride_L; kv_stride_H = params.k_stride_H; } else { kv_stride_B = params.v_stride_B; kv_stride_L = params.v_stride_L; kv_stride_H = params.v_stride_H; } // The hidden size. int hidden_size_per_head = 0; if( so == 0 ) { hidden_size_per_head = params.E; } else { hidden_size_per_head = params.M; } // Where to start reading from. int offset_qg = bi*qg_stride_B + hi*qg_stride_H + si; int offset_kv = bi*kv_stride_B + hi*kv_stride_H + si; // We walk backward, account for the extra offset. offset_qg += (params.L-1)*qg_stride_L; offset_kv += (params.L-1)*kv_stride_L; // Determine the base pointers for Q, K, V and G. const float *ptr_qg = &(so == 0 ? params.q : params.g)[offset_qg]; const float *ptr_kv = &(so == 0 ? params.k : params.v)[offset_kv]; // Is it an active thread? const int active = si < hidden_size_per_head; // Trigger the memory loads for Q, K, V and G. float ldg_qg = 0.f, ldg_kv = 0.f; if( active ) { ldg_qg = *ptr_qg; ldg_kv = *ptr_kv; } // Move the load pointers (backward). ptr_qg -= qg_stride_L; ptr_kv -= kv_stride_L; // The number of FLOAT4s per head. constexpr int FLOAT4s_PER_HEAD = D / 4; // The number of FLOAT4s per thread. constexpr int FLOAT4s_PER_THREAD = FLOAT4s_PER_HEAD / THREADS_PER_HEAD; // The storage for the G*Q^T or Q^T*G values. float4 gq[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { gq[ii] = make_float4(0.f, 0.f, 0.f, 0.f); } // The strides for B/L/H for the K/V tensors. int out_kv_stride_B, out_kv_stride_L, out_kv_stride_H; if( so == 0 ) { out_kv_stride_B = params.out_k_stride_B; out_kv_stride_L = params.out_k_stride_L; out_kv_stride_H = params.out_k_stride_H; } else { out_kv_stride_B = params.out_v_stride_B; out_kv_stride_L = params.out_v_stride_L; out_kv_stride_H = params.out_v_stride_H; } // Where to start reading from. int offset_out_kv = bi*out_kv_stride_B + hi*out_kv_stride_H + si; // We walk backward, account for the extra offset. offset_out_kv += (params.L-1)*out_kv_stride_L; // The output pointer. float *ptr_out_kv = &(so == 0 ? params.out_k : params.out_v)[offset_out_kv]; // Store to shared memory. if( si < D ) { smem_[smem_curr].qg[so*D + si] = ldg_qg; smem_[smem_curr].kv[so*D + si] = ldg_kv; } // The position of the thread in the output dimension. int oo = si / THREADS_PER_HEAD % D; int oi = si % THREADS_PER_HEAD * 4; // Iterate over the timesteps. for( int ti = 0; ti < params.L; ++ti ) { // Is it the last iteration? int is_last = ti == params.L - 1; // Trigger the next loads. if( !is_last && active ) { ldg_qg = *ptr_qg; ldg_kv = *ptr_kv; } // Move the load pointers. ptr_qg -= qg_stride_L; ptr_kv -= kv_stride_L; // Make sure the data is in shared memory. __syncthreads(); // Each thread loads 4 values from G or Q. float4 g[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { float *smem_ptr = &smem_[smem_curr].qg[(so^1)*D + oi]; g[ii] = *reinterpret_cast<const float4*>(&smem_ptr[ii*THREADS_PER_HEAD*4]); } // Each thread loads a single from Q or G value. float q = smem_[smem_curr].qg[so*D + oo]; // Update the G*Q^T or Q*G^T product. #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { gq[ii].x += g[ii].x * q; gq[ii].y += g[ii].y * q; gq[ii].z += g[ii].z * q; gq[ii].w += g[ii].w * q; } // Load the V or K values from shared memory. float4 v[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { float *smem_ptr = &smem_[smem_curr].kv[(so^1)*D + oi]; v[ii] = *reinterpret_cast<const float4*>(&smem_ptr[ii*THREADS_PER_HEAD*4]); } // Compute the partial output value for that thread. float sum = 0.f; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { sum += v[ii].x * gq[ii].x; sum += v[ii].y * gq[ii].y; sum += v[ii].z * gq[ii].z; sum += v[ii].w * gq[ii].w; } // Finalize the computation of the sum (if we have more than 1 thread per head). if( THREADS_PER_HEAD > 1 ) { // Finalize the sum for each head. #pragma unroll for( int mask = THREADS_PER_HEAD / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Store to shared memory. if( oi == 0 ) { smem_[smem_curr].out_kv[so*D + oo] = sum; } // Make sure the data is in shared memory. __syncthreads(); // Active threads read the data to store. if( si < hidden_size_per_head ) { sum = smem_[smem_curr].out_kv[so*D + si]; } } // THREADS_PER_HEAD > 1. // Store the output. All the threads are active. if( si < hidden_size_per_head ) { *ptr_out_kv = sum; } // Move to next location. ptr_out_kv -= out_kv_stride_L; // Move the shared memory buffer. smem_curr = (smem_curr + 1) % 2; // Store to shared memory for Q and K. if( !is_last && si < D ) { smem_[smem_curr].qg[so*D + si] = ldg_qg; smem_[smem_curr].kv[so*D + si] = ldg_kv; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int D, int THREADS_PER_HEAD > int lmha_bwd_(const Lmha_bwd_params<float> &params) { int block = D*THREADS_PER_HEAD*2; if( block >= 1024 || params.B > 65535 ) { return 1; } dim3 grid(params.H, params.B); hipLaunchKernelGGL(( lmha_bwd_kernel<D, THREADS_PER_HEAD>), dim3(grid), dim3(block), 0, 0, params); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// int lmha_bwd(const Lmha_bwd_params<float> &params) { int blocks = params.B * params.H; if( blocks < LOW_OCCUPANCY_THRESHOLD ) { return 1; } int hidden_size_per_head = max(params.E, params.M); int res = 1; if( hidden_size_per_head <= 32 ) { res = lmha_bwd_< 32, 1>(params); } else if( hidden_size_per_head <= 64 ) { res = lmha_bwd_< 64, 1>(params); } else if( hidden_size_per_head <= 128 ) { res = lmha_bwd_<128, 2>(params); } else if( hidden_size_per_head <= 256 ) { res = lmha_bwd_<256, 4>(params); } return res; } //////////////////////////////////////////////////////////////////////////////////////////////////// int lmha_bwd(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, const torch::Tensor grad_out, torch::Tensor grad_queries, torch::Tensor grad_keys, torch::Tensor grad_values) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); // Make sure the inner-most dimension of the tensors is packed. assert(queries .stride(3) == 1); assert(keys .stride(3) == 1); assert(values .stride(3) == 1); assert(grad_out .stride(3) == 1); assert(grad_queries.stride(3) == 1); assert(grad_keys .stride(3) == 1); assert(grad_values .stride(3) == 1); // Extract the dimensions. int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size (3); // Gradient on Q. // The structure of params. Lmha_params<float> params; set_params(params, grad_out, values, keys, grad_queries); // Launch the kernel. int res = lmha<false>(params); if( res ) { return res; } // Gradient on K and V together. Lmha_bwd_params<float> bwd_params; bwd_params.out_k = grad_keys.data_ptr<float>(); bwd_params.out_v = grad_values.data_ptr<float>(); bwd_params.q = queries.data_ptr<float>(); bwd_params.k = keys.data_ptr<float>(); bwd_params.v = values.data_ptr<float>(); bwd_params.g = grad_out.data_ptr<float>(); bwd_params.B = N; bwd_params.L = L; bwd_params.H = H; bwd_params.E = E; bwd_params.M = M; bwd_params.q_stride_B = queries.stride(0); bwd_params.q_stride_H = queries.stride(1); bwd_params.q_stride_L = queries.stride(2); bwd_params.k_stride_B = keys.stride(0); bwd_params.k_stride_H = keys.stride(1); bwd_params.k_stride_L = keys.stride(2); bwd_params.v_stride_B = values.stride(0); bwd_params.v_stride_H = values.stride(1); bwd_params.v_stride_L = values.stride(2); bwd_params.g_stride_B = grad_out.stride(0); bwd_params.g_stride_H = grad_out.stride(1); bwd_params.g_stride_L = grad_out.stride(2); bwd_params.out_k_stride_B = grad_keys.stride(0); bwd_params.out_k_stride_H = grad_keys.stride(1); bwd_params.out_k_stride_L = grad_keys.stride(2); bwd_params.out_v_stride_B = grad_values.stride(0); bwd_params.out_v_stride_H = grad_values.stride(1); bwd_params.out_v_stride_L = grad_values.stride(2); // Try to run the fused kernel. int fallback = lmha_bwd(bwd_params); // If it failed, fallback on separate kernels for K and V. if( fallback ) { // Gradient on K. // Launch the kernel. set_params(params, values, grad_out, queries, grad_keys); res = lmha<true>(params); if( res ) { return res; } // Gradient on V. // Launch the kernel. set_params(params, keys, queries, grad_out, grad_values); return lmha<true>(params); } // It worked... return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace nvidia #endif // #ifdef ENABLE_NVIDIA_OPTIMIZATIONS //////////////////////////////////////////////////////////////////////////////////////////////////// typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor; #define E_BLOCK_SIZE 8 __global__ void causal_dot_product_kernel( const float_accessor queries, const float_accessor keys, const float_accessor values, float_accessor result, const int N, const int H, const int L, const int E, const int M ) { int n = blockIdx.y; int h = blockIdx.z; int e_start = blockIdx.x * E_BLOCK_SIZE; int m = threadIdx.x % M; extern __shared__ float shared_mem[]; float* shared_kv = shared_mem; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[m + e_local * M] = 0; } for (int t=0; t<L; t++) { float res = 0; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[e_local*M + m] += keys[n][h][t][e_local + e_start] * values[n][h][t][m]; res += queries[n][h][t][e_local + e_start] * shared_kv[e_local*M + m]; } atomicAdd( &result[n][h][t][m], res ); } } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_product_(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, torch::Tensor product) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size(3); const int blocks_per_sequence = (E + E_BLOCK_SIZE - 1) / E_BLOCK_SIZE; dim3 blockDim(M, 1, 1); dim3 gridDim(blocks_per_sequence, N, H); const int shared_mem_forward = E_BLOCK_SIZE * M * sizeof(float); hipLaunchKernelGGL(( causal_dot_product_kernel), dim3(gridDim), dim3(blockDim), shared_mem_forward, 0, queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), product.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M ); } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_product(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, torch::Tensor product) { #ifdef ENABLE_NVIDIA_OPTIMIZATIONS int fallback = nvidia::lmha_fwd(queries, keys, values, product); #else int fallback = 1; #endif if( fallback ) { causal_dot_product_(queries, keys, values, product); } } //////////////////////////////////////////////////////////////////////////////////////////////////// #define M_BLOCK_SIZE 4 // we need shared memory to store // kv // Backward direction // kv_backwards // Shared memory usage __global__ void causal_dot_backward_query_key_kernel( const float_accessor queries, const float_accessor keys, const float_accessor values, const float_accessor grad_out, float_accessor grad_queries, float_accessor grad_keys, int N, int H, int L, int E, int M ) { int n = blockIdx.y; int h = blockIdx.z; int m_start = blockIdx.x * M_BLOCK_SIZE; int e = threadIdx.x % E; extern __shared__ float shared_mem[]; const int shared_kv_size = M_BLOCK_SIZE * E; float* shared_kv = shared_mem; float* shared_kv_bw = shared_mem + shared_kv_size; for (int m_local = 0; m_local < M_BLOCK_SIZE && m_local + m_start < M; m_local++) { shared_kv[m_local * E + e] = 0; shared_kv_bw[m_local * E + e] = 0; } for (int l=0; l<L; l++) { float res = 0, res_bw = 0; int l_b = L - l - 1; for (int m_local = 0; m_local < M_BLOCK_SIZE && m_local + m_start < M; m_local++) { shared_kv[m_local*E + e] += keys[n][h][l][e] * values[n][h][l][m_start + m_local]; shared_kv_bw[m_local*E + e] += queries[n][h][l_b][e] * grad_out[n][h][l_b][m_start + m_local]; res += grad_out[n][h][l][m_start + m_local] * shared_kv[m_local*E + e]; res_bw += values[n][h][l_b][m_start + m_local] * shared_kv_bw[m_local*E + e]; } atomicAdd( &grad_queries[n][h][l][e], res ); atomicAdd( &grad_keys[n][h][l_b][e], res_bw ); } } __global__ void causal_dot_backward_value_kernel( const float_accessor queries, const float_accessor keys, const float_accessor values, const float_accessor grad_out, float_accessor grad_keys, float_accessor grad_values, int N, int H, int L, int E, int M ) { int n = blockIdx.y; int h = blockIdx.z; int e_start = blockIdx.x * E_BLOCK_SIZE; int m = threadIdx.x % M; extern __shared__ float shared_mem[]; float* shared_kv = shared_mem; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[m + e_local * M] = 0; } for (int l = 0; l < L; l++) { int l_b = L - l -1; float res = 0; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[e_local*M + m] += queries[n][h][l_b][e_start + e_local] * grad_out[n][h][l_b][m]; res += keys[n][h][l_b][e_start + e_local] * shared_kv[e_local*M + m]; } atomicAdd( &grad_values[n][h][l_b][m], res ); } } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_backward_(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, const torch::Tensor grad_out, torch::Tensor grad_queries, torch::Tensor grad_keys, torch::Tensor grad_values) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size(3); const int blocks_per_sequence = (M + M_BLOCK_SIZE - 1) / M_BLOCK_SIZE; dim3 blockDim(E, 1, 1); dim3 gridDim(blocks_per_sequence, N, H); const int shared_mem_qk_backward = 2 * M_BLOCK_SIZE * E * sizeof(float); hipLaunchKernelGGL(( causal_dot_backward_query_key_kernel), dim3(gridDim), dim3(blockDim), shared_mem_qk_backward, 0, queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M ); const int blocks_per_sequence_value = (E + E_BLOCK_SIZE - 1) / E_BLOCK_SIZE; dim3 blockDimv(M, 1, 1); dim3 gridDimv(blocks_per_sequence_value, N, H); const int shared_mem_v_backward = E_BLOCK_SIZE * M * sizeof(float); hipLaunchKernelGGL(( causal_dot_backward_value_kernel), dim3(gridDimv), dim3(blockDimv), shared_mem_v_backward, 0, queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M ); } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_backward(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, const torch::Tensor grad_out, torch::Tensor grad_queries, torch::Tensor grad_keys, torch::Tensor grad_values) { #ifdef ENABLE_NVIDIA_OPTIMIZATIONS int fallback = nvidia::lmha_bwd(queries, keys, values, grad_out, grad_queries, grad_keys, grad_values); #else int fallback = 1; #endif if( fallback ) { // Make sure that the gradient tensors are 0. This is needed because the // bwd pass might have partially executed and filled in some values in // grad_queries or grad_keys. // // This adds a small overhead every time we have to fall back to the old // kernel for the backward pass. grad_queries.zero_(); grad_keys.zero_(); causal_dot_backward_(queries, keys, values, grad_out, grad_queries, grad_keys, grad_values); } } //////////////////////////////////////////////////////////////////////////////////////////////////// PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "causal_dot_product", &causal_dot_product, "Compute the weighted sum of values but attending only to previous " "values." ); m.def( "causal_dot_backward", &causal_dot_backward, "Compute the gradients for the causal dot product." ); }
255f23fa8dbe782de071e006abdd446ae0c133ee.cu
// // Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ // Written by Angelos Katharopoulos <[email protected]>, // Apoorv Vyas <[email protected]> // // // For modifications made inside namespace nvidia (authored by jdemouth): // // Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // #include <torch/extension.h> #include <assert.h> #include <stdio.h> #define ENABLE_NVIDIA_OPTIMIZATIONS #ifdef ENABLE_NVIDIA_OPTIMIZATIONS namespace nvidia { //////////////////////////////////////////////////////////////////////////////////////////////////// constexpr int THREADS_PER_WARP = 32; //////////////////////////////////////////////////////////////////////////////////////////////////// constexpr int LOW_OCCUPANCY_THRESHOLD = 40; // TODO: Make it HW specific (like 1/2 SMs). //////////////////////////////////////////////////////////////////////////////////////////////////// static inline __device__ __host__ int div_up(int m, int n) { return (m + n-1) / n; } //////////////////////////////////////////////////////////////////////////////////////////////////// static inline __device__ __host__ int round_up(int m, int n) { return div_up(m, n) * n; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > struct Lmha_params { // The output buffer. Dimensions [B, H, L, M]. T *out; // The input Qs. Dimensions [B, H, L, E]. const T *q; // The input Ks. Dimensions [B, H, L, E]. const T *k; // The input Vs. Dimensions [B, H, L, M]. const T *v; // The different dimensions. int B, L, H, E, M; // The strides for the different tensors. int q_stride_B, q_stride_H, q_stride_L; int k_stride_B, k_stride_H, k_stride_L; int v_stride_B, v_stride_H, v_stride_L; int o_stride_B, o_stride_H, o_stride_L; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, bool GO_BACKWARD, int WARPS, int COLS_PER_THREAD = 4 > __global__ __launch_bounds__(WARPS * THREADS_PER_WARP) void lmha_low_occupancy_kernel(Lmha_params<float> params) { // The number of threads per block. constexpr int THREADS_PER_BLOCK = WARPS * THREADS_PER_WARP; // The number of rows per thread. constexpr int ROWS_PER_THREAD = E / THREADS_PER_WARP; // The number of steps per iteration. constexpr int COLS_PER_ITER = WARPS * COLS_PER_THREAD; // Make sure E is a multiple of the warp size. static_assert(E % THREADS_PER_WARP == 0, ""); // Shared memory to store V/O. __shared__ float smem_v[COLS_PER_ITER], smem_o[COLS_PER_ITER]; // Shared memory buffer to performance the reductions. __shared__ float smem_reds[E * WARPS]; // The sequence processed by that block. const int bi = blockIdx.z; // The head processed by that block. const int hi = blockIdx.y; // The hidden cell in the V/output buffers. const int vi = blockIdx.x; // The linear index of the thread. const int tidx = threadIdx.x; // Decompose the block in warp/lane. const int warp = tidx / THREADS_PER_WARP; const int lane = tidx % THREADS_PER_WARP; // The base offset loaded by the thread in Q and K. int offset_q = bi*params.q_stride_B + hi*params.q_stride_H + lane; int offset_k = bi*params.k_stride_B + hi*params.k_stride_H + lane; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_q += (params.L-1)*params.q_stride_L; offset_k += (params.L-1)*params.k_stride_L; } // Position the warp at the beginning of the proper timestep. if( GO_BACKWARD ) { offset_q -= warp*COLS_PER_THREAD*params.q_stride_L; offset_k -= warp*COLS_PER_THREAD*params.k_stride_L; } else { offset_q += warp*COLS_PER_THREAD*params.q_stride_L; offset_k += warp*COLS_PER_THREAD*params.k_stride_L; } // Determine the base pointers for Q and K. const float *ptr_q = &params.q[offset_q]; const float *ptr_k = &params.k[offset_k]; // Is a given row valid? int valid_qk[ROWS_PER_THREAD]; #pragma unroll for( int ii = 0; ii < ROWS_PER_THREAD; ++ii ) { valid_qk[ii] = lane + ii*THREADS_PER_WARP < params.E; } // The offset to the position loaded by the thread in V. int offset_v = bi*params.v_stride_B + hi*params.v_stride_H + vi; int offset_o = bi*params.o_stride_B + hi*params.o_stride_H + vi; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_v += (params.L-1)*params.v_stride_L; offset_o += (params.L-1)*params.o_stride_L; } // We load/store a strided matrix of COLS_PER_ITER x OUTPUTS_PER_BLOCK. if( GO_BACKWARD ) { offset_v -= tidx*params.v_stride_L; offset_o -= tidx*params.o_stride_L; } else { offset_v += tidx*params.v_stride_L; offset_o += tidx*params.o_stride_L; } // Determine the base pointer for V. const float *ptr_v = &params.v[offset_v]; // The output pointer. float *ptr_o = &params.out[offset_o]; // The running KVs. float running_kv[ROWS_PER_THREAD]; #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { running_kv[ri] = 0.f; } // Iterate over the timesteps. TODO: Use params.loop_count!!! for( int iter = 0; iter < params.L; iter += COLS_PER_ITER ) { // Each thread loads a matrix of elements. float q[ROWS_PER_THREAD][COLS_PER_THREAD], k[ROWS_PER_THREAD][COLS_PER_THREAD]; // Trigger the memory loads for Q and K. #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { // For Q/K, each warp loads from various timesteps. int ti = iter + warp*COLS_PER_THREAD; if( GO_BACKWARD ) { ti = params.L - 1 - ti; } // Is it a valid access? int valid; if( GO_BACKWARD ) { valid = valid_qk[ri] && ti - ci >= 0; } else { valid = valid_qk[ri] && ti + ci < params.L; } // The extra offset to add. if( GO_BACKWARD ) { offset_q = ri*THREADS_PER_WARP - ci*params.q_stride_L; offset_k = ri*THREADS_PER_WARP - ci*params.k_stride_L; } else { offset_q = ri*THREADS_PER_WARP + ci*params.q_stride_L; offset_k = ri*THREADS_PER_WARP + ci*params.k_stride_L; } // Load Q/K if they are valid. q[ri][ci] = valid ? ptr_q[offset_q] : 0.f; k[ri][ci] = valid ? ptr_k[offset_k] : 0.f; } } // For the V tensor, we assign contiguous thread to different loads. So, ti is different. int ti = iter + tidx; if( GO_BACKWARD ) { ti = params.L - 1 - ti; } // Is it a valid access? int valid_vo = tidx < COLS_PER_ITER; if( GO_BACKWARD ) { valid_vo &= ti >= 0; } else { valid_vo &= ti < params.L; } // Trigger the loads for V. float ldg_v = valid_vo ? *ptr_v : 0.f; // Move the load pointers. if( GO_BACKWARD ) { ptr_q -= COLS_PER_ITER*params.q_stride_L; ptr_k -= COLS_PER_ITER*params.k_stride_L; ptr_v -= COLS_PER_ITER*params.v_stride_L; } else { ptr_q += COLS_PER_ITER*params.q_stride_L; ptr_k += COLS_PER_ITER*params.k_stride_L; ptr_v += COLS_PER_ITER*params.v_stride_L; } // Store to shared memory. if( tidx < COLS_PER_ITER ) { smem_v[tidx] = ldg_v; } // Make sure V is in shared memory. __syncthreads(); // Read V from shared memory. float v[COLS_PER_THREAD]; #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { v[ci] = smem_v[warp*COLS_PER_THREAD + ci]; } // Each thread computes local K*V products. float kv[ROWS_PER_THREAD][COLS_PER_THREAD]; #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { kv[ri][ci] = 0.f; } } // Update the K*V^T product. #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { kv[ri][ci] += k[ri][ci] * v[ci]; } } // We must perform the prefix sums within the thread-block. Start with the thread. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { #pragma unroll for( int ci = 1; ci < COLS_PER_THREAD; ++ci ) { kv[ri][ci] += kv[ri][ci-1]; } } // Store the partial sums to shared memory. Unless we have no inter-warp reduction to perform. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { smem_reds[warp*E + ri*THREADS_PER_WARP + lane] = kv[ri][COLS_PER_THREAD-1]; } // Make sure the data is in shared memory. __syncthreads(); // Each thread deals with one or more column(s) of the matrix. constexpr int SUMS_PER_THREAD = (E + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; #pragma unroll for( int ii = 0, idx = tidx; ii < SUMS_PER_THREAD; ++ii, idx += THREADS_PER_BLOCK ) { if( idx < E ) { float sum = smem_reds[idx]; #pragma unroll for( int jj = 1; jj < WARPS; ++jj ) { smem_reds[idx + jj*E] = sum += smem_reds[idx + jj*E]; } } } // Make sure the reductions are stored in shared memory. __syncthreads(); // Each thread updates his partial products. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { float sum = running_kv[ri]; if( warp > 0 ) { sum += smem_reds[(warp-1)*E + lane + ri*THREADS_PER_WARP]; } #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { kv[ri][ci] += sum; } } // Compute the partial output values for that thread. float sum[COLS_PER_THREAD]; #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { sum[ci] = q[0][ci] * kv[0][ci]; #pragma unroll for( int ri = 1; ri < ROWS_PER_THREAD; ++ri ) { sum[ci] += q[ri][ci] * kv[ri][ci]; } } // Run the parallel reductions inside the warp. #pragma unroll for( int mask = THREADS_PER_WARP / 2; mask >= 1; mask /= 2 ) { #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { sum[ci] += __shfl_xor_sync(uint32_t(-1), sum[ci], mask); } } // Store the final output to shared memory. if( lane == 0 ) { #pragma unroll for( int ci = 0; ci < COLS_PER_THREAD; ++ci ) { smem_o[warp*COLS_PER_THREAD + ci] = sum[ci]; } } // Make sure the data is in shared memory. __syncthreads(); // Store the output. if( valid_vo ) { *ptr_o = smem_o[tidx]; } // Each thread updates his running kv. #pragma unroll for( int ri = 0; ri < ROWS_PER_THREAD; ++ri ) { running_kv[ri] += smem_reds[(WARPS-1)*E + lane + ri*THREADS_PER_WARP]; } // Move to next location. if( GO_BACKWARD ) { ptr_o -= COLS_PER_ITER*params.o_stride_L; } else { ptr_o += COLS_PER_ITER*params.o_stride_L; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, bool GO_BACKWARD, int WARPS > int lmha_low_occupancy_(const Lmha_params<float> &params) { // Make sure we are not going to launch an invalid grid. if( params.H > 65535 || params.B > 65535 ) { return 1; } // Prepare the grid and trigger the CUDA kernel. dim3 grid; grid.x = params.M; grid.y = params.H; grid.z = params.B; lmha_low_occupancy_kernel<E, GO_BACKWARD, WARPS><<<grid, WARPS*THREADS_PER_WARP>>>(params); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, bool GO_BACKWARD > int lmha_low_occupancy_(const Lmha_params<float> &params, int blocks) { if( params.M * blocks >= 8*LOW_OCCUPANCY_THRESHOLD ) { return lmha_low_occupancy_<E, GO_BACKWARD, 4>(params); } else if( params.M * blocks >= 4*LOW_OCCUPANCY_THRESHOLD ) { return lmha_low_occupancy_<E, GO_BACKWARD, 8>(params); } else { return lmha_low_occupancy_<E, GO_BACKWARD, 16>(params); } return 1; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, typename Params > static inline __device__ __host__ int smem_buffer_elts_(const Params &params) { int M = round_up(params.M, 4); return 2*E + 2*M; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, int THREADS_PER_HEAD, bool GO_BACKWARD > __global__ void lmha_kernel(Lmha_params<float> params) { // Make sure E is a multiple of 4. static_assert(E % 4 == 0, ""); // The amount of shared memory per buffer (2 buffers for double-buffering). const int smem_buffer_elts = smem_buffer_elts_<E>(params); // The M dimension for shared memory. const int M = round_up(params.M, 4); // Shared memory to store Q, K and V. Size is 2*smem_buffer_elts. extern __shared__ float smem_[]; // The various shared memory buffers. float *smem_q = &smem_[0*E]; float *smem_k = &smem_[1*E]; float *smem_v = &smem_[2*E]; float *smem_o = &smem_[2*E + M]; // The index of the shared memory buffer (for double-buffering). int smem_curr = 0; // The sequence processed by that block. const int bi = blockIdx.y; // The head processed by that block. const int hi = blockIdx.x; // The linear index of the thread. const int tidx = threadIdx.x; // The offset to the position loaded by the thread in Q. int offset_q = bi*params.q_stride_B + hi*params.q_stride_H + tidx; // The offset to the position loaded by the thread in K. int offset_k = bi*params.k_stride_B + hi*params.k_stride_H + tidx; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_q += (params.L-1)*params.q_stride_L; offset_k += (params.L-1)*params.k_stride_L; } // Determine the base pointers for Q and K. const float *ptr_q = &params.q[offset_q]; const float *ptr_k = &params.k[offset_k]; // The offset to the position loaded by the thread in V and O. int offset_v = bi*params.v_stride_B + hi*params.v_stride_H + tidx; int offset_o = bi*params.o_stride_B + hi*params.o_stride_H + tidx; // If we walk backward, account for the extra offset. if( GO_BACKWARD ) { offset_v += (params.L-1)*params.v_stride_L; offset_o += (params.L-1)*params.o_stride_L; } // Determine the base pointers for V. const float *ptr_v = &params.v[offset_v]; // Is it an active Q/K thread? const int active_qk = tidx < params.E; // Trigger the memory loads for Q and K. float ldg_q = 0.f, ldg_k = 0.f; if( active_qk ) { ldg_q = *ptr_q; ldg_k = *ptr_k; } // Is it an active V thread? const int active_v = tidx < params.M; // Trigger the memory loads for V. float ldg_v = 0.f; if( active_v ) { ldg_v = *ptr_v; } // Move the load pointers. if( GO_BACKWARD ) { ptr_q -= params.q_stride_L; ptr_k -= params.k_stride_L; ptr_v -= params.v_stride_L; } else { ptr_q += params.q_stride_L; ptr_k += params.k_stride_L; ptr_v += params.v_stride_L; } // The number of FLOAT4s per head. constexpr int FLOAT4s_PER_HEAD = E / 4; // The number of FLOAT4s per thread. constexpr int FLOAT4s_PER_THREAD = FLOAT4s_PER_HEAD / THREADS_PER_HEAD; // The storage for the K*V^T values. float4 kv[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { kv[ii] = make_float4(0.f, 0.f, 0.f, 0.f); } // The output pointer. float *out_ptr = &params.out[offset_o]; // Store to shared memory Q and K. if( tidx < E ) { smem_q[smem_curr*smem_buffer_elts + tidx] = ldg_q; smem_k[smem_curr*smem_buffer_elts + tidx] = ldg_k; } // Store to shared memory V. All threads store valid values. if( tidx < M ) { smem_v[smem_curr*smem_buffer_elts + tidx] = ldg_v; } // The position of the thread in the V dimension. int vo = tidx / THREADS_PER_HEAD; int vi = tidx % THREADS_PER_HEAD; // Iterate over the timesteps. for( int ti = 0; ti < params.L; ++ti ) { // Is it the last iteration? int is_last = ti == params.L - 1; // Trigger the next loads for Q and K. if( !is_last && active_qk ) { ldg_q = *ptr_q; ldg_k = *ptr_k; } // Trigger the next loads for V. if( !is_last && active_v ) { ldg_v = *ptr_v; } // Move the load pointers. if( GO_BACKWARD ) { ptr_q -= params.q_stride_L; ptr_k -= params.k_stride_L; ptr_v -= params.v_stride_L; } else { ptr_q += params.q_stride_L; ptr_k += params.k_stride_L; ptr_v += params.v_stride_L; } // Make sure the data is in shared memory. __syncthreads(); // Each thread loads 4 values from K. float4 k[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { int ki = tidx % THREADS_PER_HEAD * 4 + ii * THREADS_PER_HEAD * 4; k[ii] = *reinterpret_cast<const float4*>(&smem_k[smem_curr*smem_buffer_elts + ki]); } // Each thread loads a single V value. float v = 0.f; if( vo < params.M ) { v = *reinterpret_cast<const float *>(&smem_v[smem_curr*smem_buffer_elts + vo]); } // Update the K*V^T product. #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { kv[ii].x += k[ii].x * v; kv[ii].y += k[ii].y * v; kv[ii].z += k[ii].z * v; kv[ii].w += k[ii].w * v; } // Load the Q values from shared memory. float4 q[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { int qi = tidx % THREADS_PER_HEAD * 4 + ii * THREADS_PER_HEAD * 4; q[ii] = *reinterpret_cast<const float4*>(&smem_q[smem_curr*smem_buffer_elts + qi]); } // Compute the partial output value for that thread. float sum = 0.f; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { sum += q[ii].x * kv[ii].x; sum += q[ii].y * kv[ii].y; sum += q[ii].z * kv[ii].z; sum += q[ii].w * kv[ii].w; } // Finalize the computation of the sum (if we have more than 1 thread per head). if( THREADS_PER_HEAD > 1 ) { // Finalize the sum for each head. #pragma unroll for( int mask = THREADS_PER_HEAD / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Store to shared memory. if( vo < M && vi == 0 ) { smem_o[smem_curr*smem_buffer_elts + vo] = sum; } // Make sure the data is in shared memory. __syncthreads(); // Active threads read the data to store. if( active_v ) { sum = smem_o[smem_curr*smem_buffer_elts + tidx]; } } // THREADS_PER_HEAD > 1. // Store the output. All the threads are active. if( active_v ) { *out_ptr = sum; } // Move to next location. if( GO_BACKWARD ) { out_ptr -= params.o_stride_L; } else { out_ptr += params.o_stride_L; } // Move the shared memory buffer. smem_curr = (smem_curr + 1) % 2; // Store to shared memory for Q and K. if( !is_last && tidx < E ) { smem_q[smem_curr*smem_buffer_elts + tidx] = ldg_q; smem_k[smem_curr*smem_buffer_elts + tidx] = ldg_k; } // Store to shared memory for V. if( !is_last && tidx < M ) { smem_v[smem_curr*smem_buffer_elts + tidx] = ldg_v; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int E, int THREADS_PER_HEAD, bool GO_BACKWARD > int lmha_(const Lmha_params<float> &params) { // The M dimension rounded up to 4. int M = round_up(params.M, 4); // The number of threads in the block. int block = round_up(max(E, M*THREADS_PER_HEAD), 32); if( block > 512 || params.B > 65535 ) { return 1; } // Prepare the kernel. dim3 grid(params.H, params.B); size_t smem = smem_buffer_elts_<E>(params)*2*sizeof(float); lmha_kernel<E, THREADS_PER_HEAD, GO_BACKWARD><<<grid, block, smem>>>(params); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< bool GO_BACKWARD > int lmha(const Lmha_params<float> &params) { int blocks = params.B * params.H; int res = 1; if( blocks < LOW_OCCUPANCY_THRESHOLD ) { if( params.E <= 32 ) { res = lmha_low_occupancy_< 32, GO_BACKWARD>(params, blocks); } else if( params.E <= 64 ) { res = lmha_low_occupancy_< 64, GO_BACKWARD>(params, blocks); } else if( params.E <= 128 ) { res = lmha_low_occupancy_<128, GO_BACKWARD>(params, blocks); } else if( params.E <= 256 ) { res = lmha_low_occupancy_<256, GO_BACKWARD>(params, blocks); } } else { if( params.E <= 32 ) { res = lmha_< 32, 1, GO_BACKWARD>(params); } else if( params.E <= 48 ) { res = lmha_< 48, 1, GO_BACKWARD>(params); } else if( params.E <= 64 ) { res = lmha_< 64, 1, GO_BACKWARD>(params); } else if( params.E <= 128 ) { res = lmha_<128, 2, GO_BACKWARD>(params); } else if( params.E <= 256 ) { res = lmha_<256, 4, GO_BACKWARD>(params); } } return res; } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > inline void set_params(Lmha_params<T> &params, const torch::Tensor q, const torch::Tensor k, const torch::Tensor v, torch::Tensor o) { // Define the pointers. params.out = o.data_ptr<T>(); params.q = q.data_ptr<T>(); params.k = k.data_ptr<T>(); params.v = v.data_ptr<T>(); // Define the strides. params.q_stride_B = (int) q.stride(0); params.q_stride_H = (int) q.stride(1); params.q_stride_L = (int) q.stride(2); params.k_stride_B = (int) k.stride(0); params.k_stride_H = (int) k.stride(1); params.k_stride_L = (int) k.stride(2); params.v_stride_B = (int) v.stride(0); params.v_stride_H = (int) v.stride(1); params.v_stride_L = (int) v.stride(2); params.o_stride_B = (int) o.stride(0); params.o_stride_H = (int) o.stride(1); params.o_stride_L = (int) o.stride(2); // Extract the dimensions. int N = q.size(0); int H = q.size(1); int L = q.size(2); int E = q.size(3); int M = v.size(3); params.B = N; params.L = L; params.H = H; params.E = E; params.M = M; } //////////////////////////////////////////////////////////////////////////////////////////////////// int lmha_fwd(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, torch::Tensor product) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); // Make sure the inner-most dimension of the tensors is packed. assert(queries.stride(3) == 1); assert(keys .stride(3) == 1); assert(values .stride(3) == 1); assert(product.stride(3) == 1); // Extract the dimensions. int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size (3); // The structure of params. Lmha_params<float> params; set_params(params, queries, keys, values, product); // Launch the kernel. return lmha<false>(params); } //////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > struct Lmha_bwd_params { // The output buffer for K. Dimensions [B, H, L, D]. T *out_k; // The output buffer for V. Dimensions [B, H, L, D]. T *out_v; // The input Qs. Dimensions [B, H, L, D]. const T *q; // The input Ks. Dimensions [B, H, L, D]. const T *k; // The input Vs. Dimensions [B, H, L, D]. const T *v; // The input Gs. Dimensions [B, H, L, D]. const T *g; // The dimensions. int B, L, H, M, E; // The strides for the input tensors. int q_stride_B, q_stride_L, q_stride_H; int k_stride_B, k_stride_L, k_stride_H; int v_stride_B, v_stride_L, v_stride_H; int g_stride_B, g_stride_L, g_stride_H; // The strides for the outputs. int out_k_stride_B, out_k_stride_L, out_k_stride_H; int out_v_stride_B, out_v_stride_L, out_v_stride_H; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template< int D, int THREADS_PER_HEAD > __global__ __launch_bounds__(D*THREADS_PER_HEAD*2) void lmha_bwd_kernel(Lmha_bwd_params<float> params) { // Make sure D is a multiple of 4. static_assert(D % 4 == 0, ""); // The shared memory buffers. __shared__ struct Smem { float qg[2*D], kv[2*D], out_kv[2*D]; } smem_[2]; // The index of the shared memory buffer (for double-buffering). int smem_curr = 0; // The sequence processed by that block. const int bi = blockIdx.y; // The head processed by that block. const int hi = blockIdx.x; // The linear index of the thread. const int tidx = threadIdx.x; // Split the threads into two slices. int so = tidx / (D*THREADS_PER_HEAD); int si = tidx % (D*THREADS_PER_HEAD); // The strides for B/L/H for the Q/G tensors. int qg_stride_B, qg_stride_L, qg_stride_H; if( so == 0 ) { qg_stride_B = params.q_stride_B; qg_stride_L = params.q_stride_L; qg_stride_H = params.q_stride_H; } else { qg_stride_B = params.g_stride_B; qg_stride_L = params.g_stride_L; qg_stride_H = params.g_stride_H; } // The strides for B/L/H for the K/V tensors. int kv_stride_B, kv_stride_L, kv_stride_H; if( so == 0 ) { kv_stride_B = params.k_stride_B; kv_stride_L = params.k_stride_L; kv_stride_H = params.k_stride_H; } else { kv_stride_B = params.v_stride_B; kv_stride_L = params.v_stride_L; kv_stride_H = params.v_stride_H; } // The hidden size. int hidden_size_per_head = 0; if( so == 0 ) { hidden_size_per_head = params.E; } else { hidden_size_per_head = params.M; } // Where to start reading from. int offset_qg = bi*qg_stride_B + hi*qg_stride_H + si; int offset_kv = bi*kv_stride_B + hi*kv_stride_H + si; // We walk backward, account for the extra offset. offset_qg += (params.L-1)*qg_stride_L; offset_kv += (params.L-1)*kv_stride_L; // Determine the base pointers for Q, K, V and G. const float *ptr_qg = &(so == 0 ? params.q : params.g)[offset_qg]; const float *ptr_kv = &(so == 0 ? params.k : params.v)[offset_kv]; // Is it an active thread? const int active = si < hidden_size_per_head; // Trigger the memory loads for Q, K, V and G. float ldg_qg = 0.f, ldg_kv = 0.f; if( active ) { ldg_qg = *ptr_qg; ldg_kv = *ptr_kv; } // Move the load pointers (backward). ptr_qg -= qg_stride_L; ptr_kv -= kv_stride_L; // The number of FLOAT4s per head. constexpr int FLOAT4s_PER_HEAD = D / 4; // The number of FLOAT4s per thread. constexpr int FLOAT4s_PER_THREAD = FLOAT4s_PER_HEAD / THREADS_PER_HEAD; // The storage for the G*Q^T or Q^T*G values. float4 gq[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { gq[ii] = make_float4(0.f, 0.f, 0.f, 0.f); } // The strides for B/L/H for the K/V tensors. int out_kv_stride_B, out_kv_stride_L, out_kv_stride_H; if( so == 0 ) { out_kv_stride_B = params.out_k_stride_B; out_kv_stride_L = params.out_k_stride_L; out_kv_stride_H = params.out_k_stride_H; } else { out_kv_stride_B = params.out_v_stride_B; out_kv_stride_L = params.out_v_stride_L; out_kv_stride_H = params.out_v_stride_H; } // Where to start reading from. int offset_out_kv = bi*out_kv_stride_B + hi*out_kv_stride_H + si; // We walk backward, account for the extra offset. offset_out_kv += (params.L-1)*out_kv_stride_L; // The output pointer. float *ptr_out_kv = &(so == 0 ? params.out_k : params.out_v)[offset_out_kv]; // Store to shared memory. if( si < D ) { smem_[smem_curr].qg[so*D + si] = ldg_qg; smem_[smem_curr].kv[so*D + si] = ldg_kv; } // The position of the thread in the output dimension. int oo = si / THREADS_PER_HEAD % D; int oi = si % THREADS_PER_HEAD * 4; // Iterate over the timesteps. for( int ti = 0; ti < params.L; ++ti ) { // Is it the last iteration? int is_last = ti == params.L - 1; // Trigger the next loads. if( !is_last && active ) { ldg_qg = *ptr_qg; ldg_kv = *ptr_kv; } // Move the load pointers. ptr_qg -= qg_stride_L; ptr_kv -= kv_stride_L; // Make sure the data is in shared memory. __syncthreads(); // Each thread loads 4 values from G or Q. float4 g[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { float *smem_ptr = &smem_[smem_curr].qg[(so^1)*D + oi]; g[ii] = *reinterpret_cast<const float4*>(&smem_ptr[ii*THREADS_PER_HEAD*4]); } // Each thread loads a single from Q or G value. float q = smem_[smem_curr].qg[so*D + oo]; // Update the G*Q^T or Q*G^T product. #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { gq[ii].x += g[ii].x * q; gq[ii].y += g[ii].y * q; gq[ii].z += g[ii].z * q; gq[ii].w += g[ii].w * q; } // Load the V or K values from shared memory. float4 v[FLOAT4s_PER_THREAD]; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { float *smem_ptr = &smem_[smem_curr].kv[(so^1)*D + oi]; v[ii] = *reinterpret_cast<const float4*>(&smem_ptr[ii*THREADS_PER_HEAD*4]); } // Compute the partial output value for that thread. float sum = 0.f; #pragma unroll for( int ii = 0; ii < FLOAT4s_PER_THREAD; ++ii ) { sum += v[ii].x * gq[ii].x; sum += v[ii].y * gq[ii].y; sum += v[ii].z * gq[ii].z; sum += v[ii].w * gq[ii].w; } // Finalize the computation of the sum (if we have more than 1 thread per head). if( THREADS_PER_HEAD > 1 ) { // Finalize the sum for each head. #pragma unroll for( int mask = THREADS_PER_HEAD / 2; mask >= 1; mask /= 2 ) { sum += __shfl_xor_sync(uint32_t(-1), sum, mask); } // Store to shared memory. if( oi == 0 ) { smem_[smem_curr].out_kv[so*D + oo] = sum; } // Make sure the data is in shared memory. __syncthreads(); // Active threads read the data to store. if( si < hidden_size_per_head ) { sum = smem_[smem_curr].out_kv[so*D + si]; } } // THREADS_PER_HEAD > 1. // Store the output. All the threads are active. if( si < hidden_size_per_head ) { *ptr_out_kv = sum; } // Move to next location. ptr_out_kv -= out_kv_stride_L; // Move the shared memory buffer. smem_curr = (smem_curr + 1) % 2; // Store to shared memory for Q and K. if( !is_last && si < D ) { smem_[smem_curr].qg[so*D + si] = ldg_qg; smem_[smem_curr].kv[so*D + si] = ldg_kv; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template< int D, int THREADS_PER_HEAD > int lmha_bwd_(const Lmha_bwd_params<float> &params) { int block = D*THREADS_PER_HEAD*2; if( block >= 1024 || params.B > 65535 ) { return 1; } dim3 grid(params.H, params.B); lmha_bwd_kernel<D, THREADS_PER_HEAD><<<grid, block>>>(params); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// int lmha_bwd(const Lmha_bwd_params<float> &params) { int blocks = params.B * params.H; if( blocks < LOW_OCCUPANCY_THRESHOLD ) { return 1; } int hidden_size_per_head = max(params.E, params.M); int res = 1; if( hidden_size_per_head <= 32 ) { res = lmha_bwd_< 32, 1>(params); } else if( hidden_size_per_head <= 64 ) { res = lmha_bwd_< 64, 1>(params); } else if( hidden_size_per_head <= 128 ) { res = lmha_bwd_<128, 2>(params); } else if( hidden_size_per_head <= 256 ) { res = lmha_bwd_<256, 4>(params); } return res; } //////////////////////////////////////////////////////////////////////////////////////////////////// int lmha_bwd(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, const torch::Tensor grad_out, torch::Tensor grad_queries, torch::Tensor grad_keys, torch::Tensor grad_values) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); // Make sure the inner-most dimension of the tensors is packed. assert(queries .stride(3) == 1); assert(keys .stride(3) == 1); assert(values .stride(3) == 1); assert(grad_out .stride(3) == 1); assert(grad_queries.stride(3) == 1); assert(grad_keys .stride(3) == 1); assert(grad_values .stride(3) == 1); // Extract the dimensions. int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size (3); // Gradient on Q. // The structure of params. Lmha_params<float> params; set_params(params, grad_out, values, keys, grad_queries); // Launch the kernel. int res = lmha<false>(params); if( res ) { return res; } // Gradient on K and V together. Lmha_bwd_params<float> bwd_params; bwd_params.out_k = grad_keys.data_ptr<float>(); bwd_params.out_v = grad_values.data_ptr<float>(); bwd_params.q = queries.data_ptr<float>(); bwd_params.k = keys.data_ptr<float>(); bwd_params.v = values.data_ptr<float>(); bwd_params.g = grad_out.data_ptr<float>(); bwd_params.B = N; bwd_params.L = L; bwd_params.H = H; bwd_params.E = E; bwd_params.M = M; bwd_params.q_stride_B = queries.stride(0); bwd_params.q_stride_H = queries.stride(1); bwd_params.q_stride_L = queries.stride(2); bwd_params.k_stride_B = keys.stride(0); bwd_params.k_stride_H = keys.stride(1); bwd_params.k_stride_L = keys.stride(2); bwd_params.v_stride_B = values.stride(0); bwd_params.v_stride_H = values.stride(1); bwd_params.v_stride_L = values.stride(2); bwd_params.g_stride_B = grad_out.stride(0); bwd_params.g_stride_H = grad_out.stride(1); bwd_params.g_stride_L = grad_out.stride(2); bwd_params.out_k_stride_B = grad_keys.stride(0); bwd_params.out_k_stride_H = grad_keys.stride(1); bwd_params.out_k_stride_L = grad_keys.stride(2); bwd_params.out_v_stride_B = grad_values.stride(0); bwd_params.out_v_stride_H = grad_values.stride(1); bwd_params.out_v_stride_L = grad_values.stride(2); // Try to run the fused kernel. int fallback = lmha_bwd(bwd_params); // If it failed, fallback on separate kernels for K and V. if( fallback ) { // Gradient on K. // Launch the kernel. set_params(params, values, grad_out, queries, grad_keys); res = lmha<true>(params); if( res ) { return res; } // Gradient on V. // Launch the kernel. set_params(params, keys, queries, grad_out, grad_values); return lmha<true>(params); } // It worked... return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace nvidia #endif // #ifdef ENABLE_NVIDIA_OPTIMIZATIONS //////////////////////////////////////////////////////////////////////////////////////////////////// typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor; #define E_BLOCK_SIZE 8 __global__ void causal_dot_product_kernel( const float_accessor queries, const float_accessor keys, const float_accessor values, float_accessor result, const int N, const int H, const int L, const int E, const int M ) { int n = blockIdx.y; int h = blockIdx.z; int e_start = blockIdx.x * E_BLOCK_SIZE; int m = threadIdx.x % M; extern __shared__ float shared_mem[]; float* shared_kv = shared_mem; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[m + e_local * M] = 0; } for (int t=0; t<L; t++) { float res = 0; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[e_local*M + m] += keys[n][h][t][e_local + e_start] * values[n][h][t][m]; res += queries[n][h][t][e_local + e_start] * shared_kv[e_local*M + m]; } atomicAdd( &result[n][h][t][m], res ); } } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_product_(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, torch::Tensor product) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size(3); const int blocks_per_sequence = (E + E_BLOCK_SIZE - 1) / E_BLOCK_SIZE; dim3 blockDim(M, 1, 1); dim3 gridDim(blocks_per_sequence, N, H); const int shared_mem_forward = E_BLOCK_SIZE * M * sizeof(float); causal_dot_product_kernel<<<gridDim, blockDim, shared_mem_forward>>>( queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), product.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M ); } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_product(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, torch::Tensor product) { #ifdef ENABLE_NVIDIA_OPTIMIZATIONS int fallback = nvidia::lmha_fwd(queries, keys, values, product); #else int fallback = 1; #endif if( fallback ) { causal_dot_product_(queries, keys, values, product); } } //////////////////////////////////////////////////////////////////////////////////////////////////// #define M_BLOCK_SIZE 4 // we need shared memory to store // kv // Backward direction // kv_backwards // Shared memory usage __global__ void causal_dot_backward_query_key_kernel( const float_accessor queries, const float_accessor keys, const float_accessor values, const float_accessor grad_out, float_accessor grad_queries, float_accessor grad_keys, int N, int H, int L, int E, int M ) { int n = blockIdx.y; int h = blockIdx.z; int m_start = blockIdx.x * M_BLOCK_SIZE; int e = threadIdx.x % E; extern __shared__ float shared_mem[]; const int shared_kv_size = M_BLOCK_SIZE * E; float* shared_kv = shared_mem; float* shared_kv_bw = shared_mem + shared_kv_size; for (int m_local = 0; m_local < M_BLOCK_SIZE && m_local + m_start < M; m_local++) { shared_kv[m_local * E + e] = 0; shared_kv_bw[m_local * E + e] = 0; } for (int l=0; l<L; l++) { float res = 0, res_bw = 0; int l_b = L - l - 1; for (int m_local = 0; m_local < M_BLOCK_SIZE && m_local + m_start < M; m_local++) { shared_kv[m_local*E + e] += keys[n][h][l][e] * values[n][h][l][m_start + m_local]; shared_kv_bw[m_local*E + e] += queries[n][h][l_b][e] * grad_out[n][h][l_b][m_start + m_local]; res += grad_out[n][h][l][m_start + m_local] * shared_kv[m_local*E + e]; res_bw += values[n][h][l_b][m_start + m_local] * shared_kv_bw[m_local*E + e]; } atomicAdd( &grad_queries[n][h][l][e], res ); atomicAdd( &grad_keys[n][h][l_b][e], res_bw ); } } __global__ void causal_dot_backward_value_kernel( const float_accessor queries, const float_accessor keys, const float_accessor values, const float_accessor grad_out, float_accessor grad_keys, float_accessor grad_values, int N, int H, int L, int E, int M ) { int n = blockIdx.y; int h = blockIdx.z; int e_start = blockIdx.x * E_BLOCK_SIZE; int m = threadIdx.x % M; extern __shared__ float shared_mem[]; float* shared_kv = shared_mem; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[m + e_local * M] = 0; } for (int l = 0; l < L; l++) { int l_b = L - l -1; float res = 0; for (int e_local = 0; e_local < E_BLOCK_SIZE && e_local + e_start < E; e_local++) { shared_kv[e_local*M + m] += queries[n][h][l_b][e_start + e_local] * grad_out[n][h][l_b][m]; res += keys[n][h][l_b][e_start + e_local] * shared_kv[e_local*M + m]; } atomicAdd( &grad_values[n][h][l_b][m], res ); } } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_backward_(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, const torch::Tensor grad_out, torch::Tensor grad_queries, torch::Tensor grad_keys, torch::Tensor grad_values) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(queries.device()); int N = queries.size(0); int H = queries.size(1); int L = queries.size(2); int E = queries.size(3); int M = values.size(3); const int blocks_per_sequence = (M + M_BLOCK_SIZE - 1) / M_BLOCK_SIZE; dim3 blockDim(E, 1, 1); dim3 gridDim(blocks_per_sequence, N, H); const int shared_mem_qk_backward = 2 * M_BLOCK_SIZE * E * sizeof(float); causal_dot_backward_query_key_kernel<<<gridDim, blockDim, shared_mem_qk_backward>>>( queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M ); const int blocks_per_sequence_value = (E + E_BLOCK_SIZE - 1) / E_BLOCK_SIZE; dim3 blockDimv(M, 1, 1); dim3 gridDimv(blocks_per_sequence_value, N, H); const int shared_mem_v_backward = E_BLOCK_SIZE * M * sizeof(float); causal_dot_backward_value_kernel<<<gridDimv, blockDimv, shared_mem_v_backward>>>( queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M ); } //////////////////////////////////////////////////////////////////////////////////////////////////// void causal_dot_backward(const torch::Tensor queries, const torch::Tensor keys, const torch::Tensor values, const torch::Tensor grad_out, torch::Tensor grad_queries, torch::Tensor grad_keys, torch::Tensor grad_values) { #ifdef ENABLE_NVIDIA_OPTIMIZATIONS int fallback = nvidia::lmha_bwd(queries, keys, values, grad_out, grad_queries, grad_keys, grad_values); #else int fallback = 1; #endif if( fallback ) { // Make sure that the gradient tensors are 0. This is needed because the // bwd pass might have partially executed and filled in some values in // grad_queries or grad_keys. // // This adds a small overhead every time we have to fall back to the old // kernel for the backward pass. grad_queries.zero_(); grad_keys.zero_(); causal_dot_backward_(queries, keys, values, grad_out, grad_queries, grad_keys, grad_values); } } //////////////////////////////////////////////////////////////////////////////////////////////////// PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "causal_dot_product", &causal_dot_product, "Compute the weighted sum of values but attending only to previous " "values." ); m.def( "causal_dot_backward", &causal_dot_backward, "Compute the gradients for the causal dot product." ); }
de90b40b3bcc5972e66a329481d84e718904d62a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "regular_runtime.h" #include "../lib/cu_util.h" #include "args.h" #include "cpu_kernel.h" #include "gpu_kernel.hip" #include "data_type.h" #include <mpi.h> #include "../lib/macro.h" #include <stdio.h> #include "../lib/time_util.h" RegularRuntime::RegularRuntime( int num_procs, void *input, size_t input_size, Offset *offsets, size_t num_offsets, void *parameters, int parameter_size ):num_procs_(num_procs), input_(input), input_size_(input_size), offsets_(offsets), num_offsets_(num_offsets), parameter_(parameters), parameter_size_(parameter_size) { } void RegularRuntime::RegularInit() { //MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank_); this->num_gpus_ = GetGPUNumber(); this->num_devices_ = num_gpus_ + 1; splitter_ = new splitter(num_procs_, my_rank_, input_, input_size_, offsets_, num_offsets_, parameter_, parameter_size_); dp_mpi_ = splitter_->gen_partition(); input_pin_ = dp_mpi_->input_pin(); offsets_pin_ = dp_mpi_->offset_pin(); //create GPU reduction objects rog_ = (GO **)malloc(sizeof(GO *)*num_gpus_); //create CPU reduction objects roc_ = (CO *)malloc(sizeof(CO) * CPU_THREADS); memset(roc_, 0, sizeof(CO)*CPU_THREADS); for(int i = 0; i < CPU_THREADS; i++) roc_[i].num_buckets = R_NUM_BUCKETS_G; //init offsets task_offset_ = (size_t *)malloc(sizeof(size_t)); *task_offset_ = 0; mutex = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(mutex, NULL); //default map and reduce functions indices are 0 map_idx_ = 0; reduce_idx_ = 0; //create device related things parameters parameter_d_ = (void **)malloc(sizeof(void *) * num_gpus_); device_offsets_h = (int **)malloc(sizeof(int *)*num_gpus_); device_offsets_d = (int **)malloc(sizeof(int *)*num_gpus_); streams_ = (hipStream_t **)malloc(sizeof(hipStream_t *)*num_gpus_); //input_buffer_d_ = (void **)malloc(sizeof(void *)*num_gpus_); //offset_buffer_d_ = (Offset **)malloc(sizeof(Offset *)*num_gpus_); GO *rogh = (GO *)malloc(sizeof(GO)); memset(rogh, 0, sizeof(GO)); rogh->num_buckets = R_NUM_BUCKETS_G; //init offsets for(int i = 0; i < GPU_THREADS * GPU_BLOCKS/WARP_SIZE; i++) { rogh->offsets[i] = GLOBAL_POOL_SIZE * i / (GPU_THREADS * GPU_BLOCKS / WARP_SIZE); } for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); CUDA_SAFE_CALL(hipMalloc(&parameter_d_[i], parameter_size_)); CUDA_SAFE_CALL(hipMemcpy(parameter_d_[i], parameter_, parameter_size_, hipMemcpyHostToDevice)); streams_[i] = (hipStream_t *)malloc(sizeof(hipStream_t) * 2); hipStreamCreate(&streams_[i][0]); hipStreamCreate(&streams_[i][1]); hipHostMalloc(&device_offsets_h[i], sizeof(int)*2, hipHostMallocPortable|hipHostMallocMapped); device_offsets_h[i][0] = 0; device_offsets_h[i][1] = 0; hipMalloc(&device_offsets_d[i], sizeof(int)*2); //CUDA_SAFE_CALL(hipMalloc(&input_buffer_d_[i], input_size_/16*2)); //CUDA_SAFE_CALL(hipMalloc(&offset_buffer_d_[i], sizeof(Offset)*num_offsets_/16*2)); //CUDA_SAFE_CALL(hipMalloc(&rog_[i], sizeof(GO)*2)); CUDA_SAFE_CALL(hipMalloc((void **)&rog_[i], sizeof(GO))); CUDA_SAFE_CALL(hipMemcpy(rog_[i], rogh, sizeof(GO), hipMemcpyHostToDevice)); //allocate peer device rog buffers if(i==0) { CUDA_SAFE_CALL(hipMalloc((void **)&rog_peer_, sizeof(GO))); CUDA_SAFE_CALL(hipMemcpy(rog_peer_, rogh, sizeof(GO), hipMemcpyHostToDevice)); } //CUDA_SAFE_CALL(hipMemcpy(rog_[i] + 1, rogh, sizeof(GO), hipMemcpyHostToDevice)); } free(rogh); } void *RegularRuntime::start_cpu(void *arg) { RegularRuntime *runtime = (RegularRuntime *)arg; struct cpu_args_reg args[CPU_THREADS]; pthread_t tid[CPU_THREADS]; double before_cpu = rtclock(); for(int i = 0; i < CPU_THREADS; i++) { args[i].tid = i; args[i].runtime = runtime; pthread_create(&tid[i], NULL, compute_cpu_reg, &args[i]); } for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } double after_cpu = rtclock(); printf("%d CPU time: %f\n", runtime->my_rank_, after_cpu - before_cpu); return (void *)0; } void *RegularRuntime::start_gpu(void *arg) { return (void *)0; } void RegularRuntime::RegularStart() { pthread_t tid_cpu; //create CPU thread pthread_create(&tid_cpu, NULL, start_cpu, this); size_t total_offsets = dp_mpi_->num_offsets(); dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); int count = 0; printf("O+O+O+O+O+total offsets: %d %d\n", my_rank_, total_offsets); double before_gpu = rtclock(); while(*task_offset_ < total_offsets) { for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); int size; size_t start; //int starts[2]; //starting number for each stream //int sizes[2]; //size for each stream //int input_start[2]; //int input_end[2]; //int input_size[2]; pthread_mutex_lock(mutex); start = *task_offset_; if(start >= total_offsets) { pthread_mutex_unlock(mutex); break; } int remain = total_offsets - start; *task_offset_ += R_GPU_PREALLOC_SIZE; size = *task_offset_ < total_offsets ? R_GPU_PREALLOC_SIZE : remain; pthread_mutex_unlock(mutex); int tmp = 0; CUDA_SAFE_CALL(hipMemcpy(device_offsets_d[i], &tmp, sizeof(int), hipMemcpyHostToDevice)); //TODO: launch kernel for stream 0 hipLaunchKernelGGL(( compute_gpu), dim3(grid), dim3(block), 0, 0, input_pin_, offsets_pin_, device_offsets_d[i], size, start, rog_[i], parameter_d_[i], map_idx_, reduce_idx_ ); } count++; if(count%4==0) { for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); CUDA_SAFE_CALL(hipDeviceSynchronize()); } } } if(count%4!=0) { for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(i)); CUDA_SAFE_CALL(hipDeviceSynchronize()); } } double after_gpu = rtclock(); printf("%d gpu time: %f\n", my_rank_, after_gpu - before_gpu); //start GPUs //join GPU threads //for(int i = 0; i < num_gpus_; i++) //{ // pthread_join(tid_gpu[i], NULL); //} //join CPU thread pthread_join(tid_cpu, NULL); //merge_device(); } struct output RegularRuntime::RegularGetOutput() { } void RegularRuntime::merge_device() { //first, merge gpu objects. Send objects from gpus to gpu0 for(int i = 1; i < num_gpus_; i++) { CUDA_SAFE_CALL(hipSetDevice(0)); CUDA_SAFE_CALL(hipMemcpyPeer(rog_peer_, 0, rog_[i], i, sizeof(GO))); merge_two_gpu_objects(rog_[0], rog_peer_); } //next, merge cpu object to here. CUDA_SAFE_CALL(hipMemcpy(rog_peer_, roc_, sizeof(GO), hipMemcpyHostToDevice)); merge_two_gpu_objects(rog_[0], rog_peer_); } void RegularRuntime::merge_two_gpu_objects(GO *object1, GO *object2) { dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); hipLaunchKernelGGL(( merged), dim3(grid), dim3(block), 0, 0, object1, object2, reduce_idx_ ); CUDA_SAFE_CALL(hipDeviceSynchronize()); } void RegularRuntime::merge_nodes() { for(int i = 2; i <= num_procs_; i*=2) { int step = i/2; //receiver if(my_rank_ % i == 0) { int sender = my_rank_ + step; } //sender if((my_rank_ - step) > 0 && ((my_rank_ - step)%i == 0)) { int receiver = my_rank_ - step; } } } void RegularRuntime::set_map_idx(int idx) { this->map_idx_ = idx; } void RegularRuntime::set_reduce_idx(int idx) { this->reduce_idx_ = idx; }
de90b40b3bcc5972e66a329481d84e718904d62a.cu
#include "regular_runtime.h" #include "../lib/cu_util.h" #include "args.h" #include "cpu_kernel.h" #include "gpu_kernel.cu" #include "data_type.h" #include <mpi.h> #include "../lib/macro.h" #include <stdio.h> #include "../lib/time_util.h" RegularRuntime::RegularRuntime( int num_procs, void *input, size_t input_size, Offset *offsets, size_t num_offsets, void *parameters, int parameter_size ):num_procs_(num_procs), input_(input), input_size_(input_size), offsets_(offsets), num_offsets_(num_offsets), parameter_(parameters), parameter_size_(parameter_size) { } void RegularRuntime::RegularInit() { //MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank_); this->num_gpus_ = GetGPUNumber(); this->num_devices_ = num_gpus_ + 1; splitter_ = new splitter(num_procs_, my_rank_, input_, input_size_, offsets_, num_offsets_, parameter_, parameter_size_); dp_mpi_ = splitter_->gen_partition(); input_pin_ = dp_mpi_->input_pin(); offsets_pin_ = dp_mpi_->offset_pin(); //create GPU reduction objects rog_ = (GO **)malloc(sizeof(GO *)*num_gpus_); //create CPU reduction objects roc_ = (CO *)malloc(sizeof(CO) * CPU_THREADS); memset(roc_, 0, sizeof(CO)*CPU_THREADS); for(int i = 0; i < CPU_THREADS; i++) roc_[i].num_buckets = R_NUM_BUCKETS_G; //init offsets task_offset_ = (size_t *)malloc(sizeof(size_t)); *task_offset_ = 0; mutex = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(mutex, NULL); //default map and reduce functions indices are 0 map_idx_ = 0; reduce_idx_ = 0; //create device related things parameters parameter_d_ = (void **)malloc(sizeof(void *) * num_gpus_); device_offsets_h = (int **)malloc(sizeof(int *)*num_gpus_); device_offsets_d = (int **)malloc(sizeof(int *)*num_gpus_); streams_ = (cudaStream_t **)malloc(sizeof(cudaStream_t *)*num_gpus_); //input_buffer_d_ = (void **)malloc(sizeof(void *)*num_gpus_); //offset_buffer_d_ = (Offset **)malloc(sizeof(Offset *)*num_gpus_); GO *rogh = (GO *)malloc(sizeof(GO)); memset(rogh, 0, sizeof(GO)); rogh->num_buckets = R_NUM_BUCKETS_G; //init offsets for(int i = 0; i < GPU_THREADS * GPU_BLOCKS/WARP_SIZE; i++) { rogh->offsets[i] = GLOBAL_POOL_SIZE * i / (GPU_THREADS * GPU_BLOCKS / WARP_SIZE); } for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); CUDA_SAFE_CALL(cudaMalloc(&parameter_d_[i], parameter_size_)); CUDA_SAFE_CALL(cudaMemcpy(parameter_d_[i], parameter_, parameter_size_, cudaMemcpyHostToDevice)); streams_[i] = (cudaStream_t *)malloc(sizeof(cudaStream_t) * 2); cudaStreamCreate(&streams_[i][0]); cudaStreamCreate(&streams_[i][1]); cudaHostAlloc(&device_offsets_h[i], sizeof(int)*2, cudaHostAllocPortable|cudaHostAllocMapped); device_offsets_h[i][0] = 0; device_offsets_h[i][1] = 0; cudaMalloc(&device_offsets_d[i], sizeof(int)*2); //CUDA_SAFE_CALL(cudaMalloc(&input_buffer_d_[i], input_size_/16*2)); //CUDA_SAFE_CALL(cudaMalloc(&offset_buffer_d_[i], sizeof(Offset)*num_offsets_/16*2)); //CUDA_SAFE_CALL(cudaMalloc(&rog_[i], sizeof(GO)*2)); CUDA_SAFE_CALL(cudaMalloc((void **)&rog_[i], sizeof(GO))); CUDA_SAFE_CALL(cudaMemcpy(rog_[i], rogh, sizeof(GO), cudaMemcpyHostToDevice)); //allocate peer device rog buffers if(i==0) { CUDA_SAFE_CALL(cudaMalloc((void **)&rog_peer_, sizeof(GO))); CUDA_SAFE_CALL(cudaMemcpy(rog_peer_, rogh, sizeof(GO), cudaMemcpyHostToDevice)); } //CUDA_SAFE_CALL(cudaMemcpy(rog_[i] + 1, rogh, sizeof(GO), cudaMemcpyHostToDevice)); } free(rogh); } void *RegularRuntime::start_cpu(void *arg) { RegularRuntime *runtime = (RegularRuntime *)arg; struct cpu_args_reg args[CPU_THREADS]; pthread_t tid[CPU_THREADS]; double before_cpu = rtclock(); for(int i = 0; i < CPU_THREADS; i++) { args[i].tid = i; args[i].runtime = runtime; pthread_create(&tid[i], NULL, compute_cpu_reg, &args[i]); } for(int j = 0; j < CPU_THREADS; j++) { pthread_join(tid[j], NULL); } double after_cpu = rtclock(); printf("%d CPU time: %f\n", runtime->my_rank_, after_cpu - before_cpu); return (void *)0; } void *RegularRuntime::start_gpu(void *arg) { return (void *)0; } void RegularRuntime::RegularStart() { pthread_t tid_cpu; //create CPU thread pthread_create(&tid_cpu, NULL, start_cpu, this); size_t total_offsets = dp_mpi_->num_offsets(); dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); int count = 0; printf("O+O+O+O+O+total offsets: %d %d\n", my_rank_, total_offsets); double before_gpu = rtclock(); while(*task_offset_ < total_offsets) { for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); int size; size_t start; //int starts[2]; //starting number for each stream //int sizes[2]; //size for each stream //int input_start[2]; //int input_end[2]; //int input_size[2]; pthread_mutex_lock(mutex); start = *task_offset_; if(start >= total_offsets) { pthread_mutex_unlock(mutex); break; } int remain = total_offsets - start; *task_offset_ += R_GPU_PREALLOC_SIZE; size = *task_offset_ < total_offsets ? R_GPU_PREALLOC_SIZE : remain; pthread_mutex_unlock(mutex); int tmp = 0; CUDA_SAFE_CALL(cudaMemcpy(device_offsets_d[i], &tmp, sizeof(int), cudaMemcpyHostToDevice)); //TODO: launch kernel for stream 0 compute_gpu<<<grid, block>>> ( input_pin_, offsets_pin_, device_offsets_d[i], size, start, rog_[i], parameter_d_[i], map_idx_, reduce_idx_ ); } count++; if(count%4==0) { for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); } } } if(count%4!=0) { for(int i = 0; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(i)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); } } double after_gpu = rtclock(); printf("%d gpu time: %f\n", my_rank_, after_gpu - before_gpu); //start GPUs //join GPU threads //for(int i = 0; i < num_gpus_; i++) //{ // pthread_join(tid_gpu[i], NULL); //} //join CPU thread pthread_join(tid_cpu, NULL); //merge_device(); } struct output RegularRuntime::RegularGetOutput() { } void RegularRuntime::merge_device() { //first, merge gpu objects. Send objects from gpus to gpu0 for(int i = 1; i < num_gpus_; i++) { CUDA_SAFE_CALL(cudaSetDevice(0)); CUDA_SAFE_CALL(cudaMemcpyPeer(rog_peer_, 0, rog_[i], i, sizeof(GO))); merge_two_gpu_objects(rog_[0], rog_peer_); } //next, merge cpu object to here. CUDA_SAFE_CALL(cudaMemcpy(rog_peer_, roc_, sizeof(GO), cudaMemcpyHostToDevice)); merge_two_gpu_objects(rog_[0], rog_peer_); } void RegularRuntime::merge_two_gpu_objects(GO *object1, GO *object2) { dim3 grid(GPU_BLOCKS, 1, 1); dim3 block(GPU_THREADS, 1, 1); merged<<<grid, block>>> ( object1, object2, reduce_idx_ ); CUDA_SAFE_CALL(cudaDeviceSynchronize()); } void RegularRuntime::merge_nodes() { for(int i = 2; i <= num_procs_; i*=2) { int step = i/2; //receiver if(my_rank_ % i == 0) { int sender = my_rank_ + step; } //sender if((my_rank_ - step) > 0 && ((my_rank_ - step)%i == 0)) { int receiver = my_rank_ - step; } } } void RegularRuntime::set_map_idx(int idx) { this->map_idx_ = idx; } void RegularRuntime::set_reduce_idx(int idx) { this->reduce_idx_ = idx; }
48a8e58d59fa8bad5e3814b236c857de381328e8.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2013 Science and Technology Facilities Council (STFC) * Authors: Evgueni Ovtchinnikov and Jonathan Hogg * * This file contains CUDA kernels for partial LL^T and LDL^T factorization * of dense submatrices. */ #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include "ssids/gpu/kernels/datatypes.h" #include "cuda/cuda_check.h" #define min(x,y) ((x) < (y) ? (x) : (y)) #define FAVOUR2x2 100 #define CBLOCKS 3 #define MCBLOCKS 8 #define BLOCKS 7 #define MBLOCKS 11 #define BLOCK_SIZE 8 #define MAX_CUDA_BLOCKS 65535 using namespace spral::ssids::gpu; namespace /* anon */ { extern __shared__ char SharedMemory[]; __global__ void cu_block_ldlt_init( int ncols, int* stat, int* ind ) { if ( threadIdx.x == 0 ) { stat[0] = ncols; // successful pivots stat[1] = 0; } if ( threadIdx.x < ncols ) ind[threadIdx.x] = ncols + 1; } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_chol_fact( unsigned int block, int nrows, // number of rows of the factorized matrix int ncols, // number of columns thereof ELEMENT_TYPE* a, // array of elements of A int lda, // leading dimension of a ELEMENT_TYPE* fs // initial L factor (shared mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x; // row index for ( int tile = 0; tile < TILES; tile++ ) { if ( tile ) { // load A's offdiagonal tiles into shared memory x = ncols + threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; // offdiagonal row index in A fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y] = ( x < nrows && threadIdx.y < ncols ) ? a[x + lda*threadIdx.y] : 0.0; } else { // load the diagonal (pivot) tile fs[threadIdx.x + SIZE_X*threadIdx.y] = ( threadIdx.x < ncols && threadIdx.y < ncols ) ? a[threadIdx.x + lda*threadIdx.y] : 0.0; } } } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_save_chol_fact( unsigned int block, int nrows, // number of rows of the factorized matrix int ncols, // number of columns thereof ELEMENT_TYPE* fs, // initial L factor (shared mem) ELEMENT_TYPE* f, // array of elements of L int ldf // leading dimension of f ) { const int SIZE_X = TILES*TILE_SIZE; int x; // row index for ( int tile = 0; tile < TILES; tile++ ) { if ( tile ) { // upload the relevant elements of fs to f x = ncols + threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x < nrows && threadIdx.y < ncols ) f[x + ldf*threadIdx.y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; } else if ( block == 0 ) { // upload to f and fd if ( threadIdx.x < ncols && threadIdx.y < ncols ) f[threadIdx.x + ldf*threadIdx.y] = fs[threadIdx.x + SIZE_X*threadIdx.y]; } } // loop through tiles ends here } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_block_chol( int block, int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* f, int ldf, int* stat ) { const int SIZE_X = TILES*TILE_SIZE; int ip; ELEMENT_TYPE v; ELEMENT_TYPE *work = (ELEMENT_TYPE*)SharedMemory; // load A into shared memory dev_init_chol_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, a, lda, work ); __syncthreads(); for ( ip = 0; ip < ncols; ip++ ) { v = work[ip + SIZE_X*ip]; if ( v <= 0.0 ) { if ( block == 0 && threadIdx.x == 0 && threadIdx.y == 0 ) stat[0] = ip; return; } v = sqrt(v); __syncthreads(); if ( threadIdx.y < TILES ) work[threadIdx.x + TILE_SIZE*threadIdx.y + SIZE_X*ip] /= v; __syncthreads(); if ( threadIdx.y > ip && threadIdx.y < ncols ) { for ( int x = threadIdx.x + TILE_SIZE; x < SIZE_X; x += TILE_SIZE ) work[x + SIZE_X*threadIdx.y] -= work[threadIdx.y + SIZE_X*ip]*work[x + SIZE_X*ip]; if ( threadIdx.x > ip ) work[threadIdx.x + SIZE_X*threadIdx.y] -= work[threadIdx.y + SIZE_X*ip] *work[threadIdx.x + SIZE_X*ip]; } __syncthreads(); } if ( block == 0 && threadIdx.x == 0 && threadIdx.y == 0 ) stat[0] = ncols; // save the L factor dev_save_chol_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, work, f, ldf ); } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_block_chol( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* f, int ldf, int* stat ) { dev_block_chol< ELEMENT_TYPE, TILE_SIZE, TILES > ( blockIdx.x, nrows, ncols, a, lda, f, ldf, stat ); } struct multinode_chol_type { int nrows; int ncols; double *lcol; }; // input data type for multiblock_fact and multiblock_chol // each CUDA block gets a copy struct multiblock_fact_type { int nrows; // no node's rows int ncols; // no node's cols int ld; // node's leading dimension int p; // no rows above the pivot block double *aptr; // pointer to this node's A matrix double *ldptr; // pointer to this node's LD matrix int offf; // this node's L offset in the array of all Ls double *dptr; // pointer to this node's D in array of all Ds int node; // node index int offb; // the idx of the first CUDA block processing this node }; __global__ void cu_multiblock_fact_setup( struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int offb, int* stat, int* ind, int* nl ) { ndata += blockIdx.x; int ncols = ndata->ncols; int nrows = ndata->nrows; double *lval = ndata->lval; double *ldval = ndata->ldval; double *dval = ndata->dval; int ib = ndata->ib; int jb = ndata->jb; int done = ndata->done; int rght = ndata->rght; int lbuf = ndata->lbuf; if ( jb < ib ) return; int pivoted = stat[blockIdx.x]; if ( pivoted > 0 ) { done += pivoted; if ( jb == rght ) jb = done; } if ( jb <= ncols ) ib = jb + 1; __syncthreads(); if ( threadIdx.x == 0 && threadIdx.y == 0 ) { ndata->ib = ib; ndata->jb = jb; ndata->done = done; } if ( ib > ncols ) return; if ( ib > rght ) { rght += step; if ( rght > ncols ) rght = ncols; if ( threadIdx.x == 0 && threadIdx.y == 0 ) { ndata->rght = rght; } } int rb = nrows - done; int cb = rght - ib + 1; if ( cb > block_size ) cb = block_size; if ( threadIdx.x == 0 && threadIdx.y == 0 ) { ndata->jb = jb + cb; stat[blockIdx.x] = cb; // successful pivots } if ( ind && threadIdx.x < cb && threadIdx.y == 0 ) ind[blockIdx.x*block_size + threadIdx.x] = cb + 1; int k = (rb - cb - 1)/(block_size*(blocks - 1)) + 1; __shared__ int ncb; if ( threadIdx.x == 0 && threadIdx.y == 0 ) ncb = atomicAdd(&nl[0], k); __shared__ int iwork[9]; __shared__ double *lptr, *ldptr, *dptr; if ( threadIdx.x == 0 && threadIdx.y == 0 ) { iwork[0] = cb; iwork[1] = rb; iwork[2] = nrows; iwork[3] = ib - done - 1; lptr = lval + done + (ib - 1)*nrows; ldptr = ldval + done + (ib - 1)*nrows; iwork[5] = lbuf + done; dptr = dval + 2*done; iwork[7] = offb + blockIdx.x; iwork[8] = ncb; } __syncthreads(); for ( int i = threadIdx.y; i < k; i += blockDim.y ) { switch(threadIdx.x) { case 0: mbfdata[ncb+i].ncols = iwork[0]; break; case 1: mbfdata[ncb+i].nrows = iwork[1]; break; case 2: mbfdata[ncb+i].ld = iwork[2]; break; case 3: mbfdata[ncb+i].p = iwork[3]; break; case 4: mbfdata[ncb+i].aptr = lptr; mbfdata[ncb+i].ldptr = ldptr; break; case 5: mbfdata[ncb+i].offf = iwork[5]; break; case 6: mbfdata[ncb+i].dptr = dptr; break; case 7: mbfdata[ncb+i].node = iwork[7]; break; case 8: mbfdata[ncb+i].offb = i; break; } } } //////////////////////////////////////////////////////////////////////////// /* Functions below participate in the LDLT factorization | A_u P| |L_u| Q A P = |P^T A_d P| = |L_d| * D * (L_d)^T = L * D * (L_d)^T (LDLT) | A_l P| |L_l| where A is nrows x ncols, P is a ncols x ncols permutation matrix, |I_u | Q = | P^T |, where I_u and I_l are identities, | I_l| L_d is a ncols x ncols lower triangular matrix with unit main diagonal and D is a ncols x ncols block diagonal matrix with 1x1 and 2x2 blocks on the main diagonal. Common variable names: nrow number of rows in A/L ncols numbre of columns in A/L offp number of rows in A_u */ //////////////////////////////////////////////////////////////////////////// /* The next function initializes L and the main diagonal and subdiagonal of D**(-1). L and L*D are stored in two shared memory arrays fs and fds, each arranged into TILES square tiles of size TILE_SIZE. The kernel for factorizing just one node uses TILES = 7, and the one for simultaneous factorization of several nodes uses TILES = 11. Each CUDA block uses dev_init_fact to load A_d into the first tile of fs and up to (TILES - 1)*TILE_SIZE rows of A_u and A_l into the remaining TILES - 1 tiles. The two diagonals of D**(-1) are stored in a shared memory array of size 2*TILE_SIZE, initialized to 0 by this kernel. */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_fact( unsigned int block, // relative CUDA block number int nrows, int ncols, int offp, ELEMENT_TYPE* a, // array of elements of A int lda, // leading dimension of a ELEMENT_TYPE* fs, // initial L factor (shared mem) ELEMENT_TYPE* ds // initial D**(-1) (shared mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x, y; // position indices y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread if ( threadIdx.y < TILE_SIZE ) { for ( int tile = 0; tile < TILES; tile += 2 ) { if ( tile ) { // load A_u and A_l's even tiles into shared memory x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; // offdiagonal row index in A if ( x >= offp ) x += ncols; // skip A_d fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y] = ( x < nrows && threadIdx.y < ncols ) ? a[x + lda*threadIdx.y] : 0.0; } else { // load A_d fs[threadIdx.x + SIZE_X*threadIdx.y] = ( threadIdx.x < ncols && threadIdx.y < ncols ) ? a[offp + threadIdx.x + lda*threadIdx.y] : 0.0; } } } else { // load A_u and A_l's odd tiles into shared memory for ( int tile = 1; tile < TILES; tile += 2 ) { x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) x += ncols; fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*y] = ( x < nrows && y < ncols ) ? a[x + lda*y] : 0.0; } } // main diagonal and subdiagonal of D**(-1) set to 0 if ( threadIdx.y < 2 ) ds[2*threadIdx.x + threadIdx.y] = 0.0; } /* The next function uploads L, L*D and D to global memory */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_save_fact( unsigned int block, int nrows, int ncols, int offp, int my, // save only if my is non-zero ELEMENT_TYPE* fs, // L (shared mem) ELEMENT_TYPE* fds, // L*D (shared mem) ELEMENT_TYPE* ds, // 2 diags of D**(-1) (shared mem) ELEMENT_TYPE* f, // L (global mem) int ldf, // leading dimension of f ELEMENT_TYPE* fd, // L*D (global mem) int ldfd, // leading dimension of fd ELEMENT_TYPE* d // 2 diags of D**(-1) (global mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x, y; // position indices y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread if ( threadIdx.y < TILE_SIZE ) { // warps 0, 1 for ( int tile = 0; tile < TILES; tile += 2 ) { if ( tile ) { // upload L_u, L_l, L_u*D and L_l*D's even tiles x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) // skip L_d x += ncols; if ( x < nrows && threadIdx.y < ncols && my ) { f[x + ldf*threadIdx.y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; fd[x + ldfd*threadIdx.y] = fds[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; } } else if ( block == 0 ) { // upload L_d and L_d*D if ( threadIdx.x < ncols && threadIdx.y < ncols && my ) { f[offp + threadIdx.x + ldf*threadIdx.y] = fs[threadIdx.x + SIZE_X*threadIdx.y]; fd[offp + threadIdx.x + ldfd*threadIdx.y] = fds[threadIdx.x + SIZE_X*threadIdx.y]; } // upload D**(-1) if ( threadIdx.x < 2 && threadIdx.y < ncols ) d[threadIdx.x + 2*threadIdx.y] = ds[threadIdx.x + 2*threadIdx.y]; } } // loop through even tiles ends here } else { // upload L_u, L_l, L_u*D and L_l*D's odd tiles (warps 2, 3) for ( int tile = 1; tile < TILES; tile += 2 ) { x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) // skip L_d x += ncols; if ( x < nrows && y < ncols && my ) { f[x + ldf*y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*y]; fd[x + ldfd*y] = fds[threadIdx.x + tile*TILE_SIZE + SIZE_X*y]; } } } } /* The next function finds the largest element of the first row of A_d */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_max( int ncols, const ELEMENT_TYPE* fs, int mx, // this thread mask int* mask, // pivot index/mask bool* not_max, // "not largest" flag int& jps, // the index of the largest element int& quit // pivoting failure flag ) { const int SIZE_X = TILES*TILE_SIZE; if ( threadIdx.y == 0 ) { mask[threadIdx.x] = mx; // initialize the pivot index not_max[threadIdx.x] = mx; // initialize the "not largest" flag } if ( threadIdx.x == 0 && threadIdx.y == 0 ) { jps = TILE_SIZE; // initialize pivot col jp: cf the case of a tie below quit = 0; // initialize failure flag } __syncthreads(); // check if the element in the column threadIdx.x // of the first row is (one of) the largest one(s) if ( threadIdx.x < ncols && threadIdx.y < ncols && threadIdx.x != threadIdx.y && abs(fs[SIZE_X*threadIdx.x]) < abs(fs[SIZE_X*threadIdx.y]) ) not_max[threadIdx.x] = 1; // no good: a larger value exists elsewhere __syncthreads(); // select the leftmost among the largest elements of the row if ( threadIdx.y == 0 && not_max[threadIdx.x] == 0 ) atomicMin(&jps, threadIdx.x); // in case of a tie, choose the leftmost __syncthreads(); } /* The next function selects pivot based on the pending row number ip and the column number for the largest element in this row. Three options are considered: (1) use 1x1 pivot a11 = fs[ip + ld*ip], (2) use 1x1 pivot a22 = fs[jp + ld*jp], (3) use 2x2 pivot | a_11 a_12 | | a_12 a_22 |, where a12 = fs[ip + ld*jp]. The pivot that has the smallest inverse is selected. */ template< typename ELEMENT_TYPE > __device__ void dev_select_pivots_at_root( const ELEMENT_TYPE* fs, int ld, // leading dimension of fs int& ip, int& jp, ELEMENT_TYPE& a11, ELEMENT_TYPE& a12, ELEMENT_TYPE& a22, ELEMENT_TYPE& det ) { // select the pivot based on the row's largest element index if ( ip != jp ) { // choose between 1x1 and 2x2 pivots a11 = fs[ip + ld*ip]; a12 = fs[ip + ld*jp]; a22 = fs[jp + ld*jp]; det = a11*a22 - a12*a12; // determinant of 2x2 pivot stored in det if ( (abs(a12) + abs(a11) + abs(a22))*abs(a11) > abs(det) ) { if ( abs(a11) > abs(a22) ) { // choose the best 1x1 alternative jp = ip; // select a11 det = a11; // pivot value stored in det } else { ip = jp; // select a22 det = a22; // pivot value stored in det } } else if ( (abs(a12) + abs(a11) + abs(a22))*abs(a22) > abs(det) ) { ip = jp; // select a22 det = a22; // pivot value stored in det } } else det = fs[ip + ld*ip]; // pivot value stored in det } template< typename ELEMENT_TYPE > __device__ void dev_select_pivots( const ELEMENT_TYPE* fs, int ld, // leading dimension of fs int& ip, int& jp, ELEMENT_TYPE& a11, ELEMENT_TYPE& a12, ELEMENT_TYPE& a22, ELEMENT_TYPE& det ) { // select the pivot based on the row's largest element index if ( ip != jp ) { // choose between 1x1 and 2x2 pivots a11 = fs[ip + ld*ip]; a12 = fs[ip + ld*jp]; a22 = fs[jp + ld*jp]; det = a11*a22 - a12*a12; // determinant of 2x2 pivot stored in det if ( (abs(a12) + abs(a11) + abs(a22))*abs(a11) > FAVOUR2x2*abs(det) ) { if ( abs(a11) > abs(a22) ) { // choose the best 1x1 alternative jp = ip; // select a11 det = a11; // pivot value stored in det } else { ip = jp; // select a22 det = a22; // pivot value stored in det } } else if ( (abs(a12) + abs(a11) + abs(a22))*abs(a22) > FAVOUR2x2*abs(det) ) { ip = jp; // select a22 det = a22; // pivot value stored in det } } else det = fs[ip + ld*ip]; // pivot value stored in det } /* The next function tries to apply 1x1 pivot. */ template< typename ELEMENT_TYPE > __device__ bool dev_1x1_pivot_fails( int x, int ip, ELEMENT_TYPE* fs, ELEMENT_TYPE* fds, int ld, ELEMENT_TYPE det, ELEMENT_TYPE delta, ELEMENT_TYPE eps ) { // the column of fds is that of fs before the division by pivot ELEMENT_TYPE u = fds[x + ld*ip] = fs[x + ld*ip]; if ( abs(det) <= eps ) { // the pivot is considered to be zero if ( abs(u) <= eps ) { // the off-diagonal is considered to be zero if ( x == ip ) fs[x + ld*ip] = 1.0; else fs[x + ld*ip] = 0.0; } else { // non-zero off-diagonal element found -> return 1; // this column to be delayed } } else if ( abs(det) <= delta*abs(u) ) // pivot too small -> return 1; // this column to be delayed else fs[x + ld*ip] = u/det; // ok to divide return 0; } /* The next function tries to apply 1x1 pivot. */ template< typename ELEMENT_TYPE > __device__ bool dev_2x2_pivot_fails( int x, int ip, int jp, ELEMENT_TYPE* fs, ELEMENT_TYPE* fds, int ld, ELEMENT_TYPE a11, ELEMENT_TYPE a12, ELEMENT_TYPE a22, ELEMENT_TYPE det, ELEMENT_TYPE delta, ELEMENT_TYPE eps ) { // the columns of fds is those of fd before division by pivot ELEMENT_TYPE u = fds[x + ld*ip] = fs[x + ld*ip]; ELEMENT_TYPE v = fds[x + ld*jp] = fs[x + ld*jp]; if ( abs(det) <= abs(a11)*abs(a22)*1.0e-15 || // the determinant is smaller than round-off errors -> // the pivot is considered to be zero abs(det) <= eps*(abs(a11) + abs(a22) + abs(a12)) // the inverse of the pivot is of the order 1/eps -> // the pivot is considered to be zero ) { if ( max(abs(u), abs(v)) <= eps ) { // the off-diagonal is "zero" if ( x == ip ) { fs[x + ld*ip] = 1.0; fs[x + ld*jp] = 0.0; } else if ( x == jp ) { fs[x + ld*ip] = 0.0; fs[x + ld*jp] = 1.0; } else { fs[x + ld*ip] = 0.0; fs[x + ld*jp] = 0.0; } } else // non-zero off-diagonal element found -> return 1; // this column to be delayed } else if ( abs(det) <= delta*max(abs(a22*u - a12*v), abs(a11*v - a12*u)) ) // pivot too small -> return 1; // this column to be delayed else { // ok to divide fs[x + ld*ip] = (a22*u - a12*v)/det; fs[x + ld*jp] = (a11*v - a12*u)/det; } return 0; } /* The next function eliminates the pivoted column from non-pivoted */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES // = 7 for a single node and = 11 for many nodes > __device__ void dev_eliminate_1x1( int& x, // row for this thread int y, // column for this thread int ip, // pivoted column ELEMENT_TYPE* fs, int ld, ELEMENT_TYPE p // pivot value ) { if ( x != ip ) fs[x + ld*y] -= p * fs[x + ld*ip]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; if ( TILES == 11 ) { // several nodes case x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; } } /* The next function eliminates the two pivoted columns from non-pivoted */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_eliminate_2x2( int& x, int y, int ip, int jp, ELEMENT_TYPE* fs, int ld, ELEMENT_TYPE pi, ELEMENT_TYPE pj ) { if ( x != ip && x != jp ) fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; if ( TILES == 11 ) { // several nodes case x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; } } /* The next function performs elimination in one tile only */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE > inline __device__ void dev_eliminate( int& x, int y, int ip, int jp, ELEMENT_TYPE* fs, int ld, ELEMENT_TYPE pi, ELEMENT_TYPE pj ) { x += TILE_SIZE; if ( ip == jp ) fs[x + ld*y] -= pi * fs[x + ld*ip]; else fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; } /* Performs the factorization (LDLT). The outline of the factorization algorithm is as follows. 1. L = A 2. A diagonal block of L of size 1 or 2 is selected 3. A division of the corresponding (one or two) columns of L by the selected block (pivoting) is considered and is accepted only if the elements of the resulting columns are not going to be greater than the inverse of the "pivoting threshold" delta; otherwise kernel terminates. 4. If not all columns are pivoted, go to 2. Called by cu_block_ldlt and cu_multiblock_ldlt factorization kernels. */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_block_ldlt( unsigned int block, int nrows, // number of rows of the factorized matrix int ncols, // number of columns thereof int offp, // number of rows above the pivot block ELEMENT_TYPE* a, // array of elements of A int lda, // leading dimension of a ELEMENT_TYPE* f, // array of elements of the L factor int ldf, // leading dimension of f ELEMENT_TYPE* fd, // array of elements of L*D int ldfd, // leading dimension of fd ELEMENT_TYPE* d, // array for main diagonal and subdiagonal of D ELEMENT_TYPE delta, // pivoting threashold ELEMENT_TYPE eps, // zero pivot threashold int* index, // pivot order index int* stat // number of successful pivots ) { const int SIZE_X = TILES*TILE_SIZE; int ip, jp; // pivot row and col indices int x, y; // position indices int mx, my; // masks ELEMENT_TYPE a11, a12, a22, det; // 2x2 pivot data __shared__ ELEMENT_TYPE fs[SIZE_X*TILE_SIZE]; // work array for f __shared__ ELEMENT_TYPE fds[SIZE_X*TILE_SIZE]; // work array for fd __shared__ ELEMENT_TYPE ds[2*TILE_SIZE]; // work array for d __shared__ int mask[TILE_SIZE]; // pivot mask/index __shared__ bool not_max[TILE_SIZE]; // flag for finding the largest row elm __shared__ int quit; // failure flag __shared__ int jps; // pivot column index y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread // load the diagonal and off-diagonal tiles into shared memory dev_init_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, offp, a, lda, fs, ds ); mx = (threadIdx.x < ncols ? 0 : ncols + 1); // initial pivot index // find the largest element in the first row dev_init_max< ELEMENT_TYPE, TILE_SIZE, TILES > ( ncols, fs, mx, mask, not_max, jps, quit ); for ( int row = 0, pivoted = 0; row < ncols; ) { // select the pivot based on the row's largest element index jps ip = row; jp = jps; dev_select_pivots< ELEMENT_TYPE > ( fs, SIZE_X, ip, jp, a11, a12, a22, det ); __syncthreads(); if ( threadIdx.y < TILE_SIZE + 4 ) { // the first 3 warps try to pivot x = threadIdx.x + TILE_SIZE*threadIdx.y; // fs/fds row to process if ( x < SIZE_X && (threadIdx.y || mx == 0 || mx > ncols) ) { // elements of the pivot block that should have been // zeroed by elimination are ignored if ( ip == jp ) { // 1x1 pivot if ( dev_1x1_pivot_fails< ELEMENT_TYPE > ( x, ip, fs, fds, SIZE_X, det, delta, eps ) ) quit = 1; } else { // 2x2 pivot if ( dev_2x2_pivot_fails< ELEMENT_TYPE > ( x, ip, jp, fs, fds, SIZE_X, a11, a12, a22, det, delta, eps ) ) quit = 1; } } } else { // meanwhile, one thread of the fourth warp is inverting the pivot if ( threadIdx.x == 0 && threadIdx.y == TILE_SIZE + 4 ) { mask[ip] = pivoted + 1; // assume pivot is ok for now if ( ip == jp ) { if ( abs(det) > eps ) ds[2*pivoted] = 1.0/det; // ok to invert } else { mask[jp] = pivoted + 2; // assume pivot is ok for now if ( abs(det) > abs(a11)*abs(a22)*1.0e-15 && abs(det) > eps*(abs(a11) + abs(a22) + abs(a12)) ) { ds[2*pivoted ] = a22/det; ds[2*pivoted + 1] = -a12/det; ds[2*pivoted + 2] = a11/det; } } if ( atomicMin(&stat[0], ncols) <= pivoted ) quit = 1; // some other CUDA block failed to pivot this column } } // warp fork ends here __syncthreads(); if ( quit ) { if ( threadIdx.x == 0 && threadIdx.y == 0 ) { atomicMin(&stat[0], pivoted); // record the failure in stat[0] // column(s) should not be saved - mark as non-processed mask[ip] = 0; if ( ip != jp ) mask[jp] = 0; } __syncthreads(); break; // done } // update successful pivots count if ( ip == jp ) pivoted++; else pivoted += 2; // find next pivot row to process if ( ip == row ) row++; // move forward only if this row participated in pivoting while ( row < ncols && mask[row] ) row++; // skip processed rows (parts of previous 2x2 pivots) // eliminate the recently pivoted column(s) from the rest // first row to be processed by this thread x = threadIdx.x + (threadIdx.y/TILE_SIZE)*TILE_SIZE; mx = mask[threadIdx.x]; my = mask[y]; // process the first (TILES - 3) tiles right away; // the even tiles are processed by the first two warps, // the odd by the other two if ( ip == jp ) { a11 = fs[ip + SIZE_X*y]; if ( my == 0 ) dev_eliminate_1x1< ELEMENT_TYPE, TILE_SIZE, TILES > ( x, y, ip, fs, SIZE_X, a11 ); } else { a11 = fs[ip + SIZE_X*y]; a12 = fs[jp + SIZE_X*y]; if ( my == 0 ) dev_eliminate_2x2< ELEMENT_TYPE, TILE_SIZE, TILES > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } // from here on, the first two warps deal with finding the largest element // in the next pivot row, while the other two continue elimination // in the remaining three tiles if ( threadIdx.y < TILE_SIZE ) { if ( row < ncols && threadIdx.y == 0 ) { not_max[threadIdx.x] = mx; // mask away processed elements if ( threadIdx.x == 0 ) jps = TILE_SIZE; // initialise the largest element column index } } else { // do elimination in the (TILES - 2)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); if ( threadIdx.y < TILE_SIZE ) { // mark elements in the pending row that cannot be largest if ( row < ncols ) { // check the element in column threadIdx.x if ( threadIdx.x != threadIdx.y && mx == 0 && my == 0 && abs(fs[row + SIZE_X*threadIdx.x]) < abs(fs[row + SIZE_X*threadIdx.y]) ) not_max[threadIdx.x] = 1; // no good: a larger value exists elsewhere } } else { // do elimination in the (TILES - 1)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); if ( threadIdx.y < TILE_SIZE ) { // select leftmost largest element in the row if ( row < ncols ) { if ( threadIdx.y == 0 && not_max[threadIdx.x] == 0 ) atomicMin(&jps, threadIdx.x); // in case of a tie, choose the leftmost } } else { // do elimination in the (TILES)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); } // for loop through pivot rows ends here my = mask[y]; // update successful pivot ordering in index; // if this CUDA block failed to pivot the part of column threadIdx.y of A // delegated to it, then possible successful pivoting of its other parts // by other blocks is canceled by zeroing index[threadIdx.y]; // if some other part of this column is unsuccessful, index[threadIdx.y] // remains zero if ( threadIdx.x == 0 && threadIdx.y < ncols ) atomicMin(&index[threadIdx.y], my); // save L and D factors and LD dev_save_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, offp, my, fs, fds, ds, f, ldf, fd, ldfd, d ); } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_block_ldlt( int nrows, // n.o. rows in A int ncols, // n.o. cols in A (<= TILE_SIZE) int offp, // n.o. rows in A_u ELEMENT_TYPE* a, // array of A's elements int lda, // leading dimension of a ELEMENT_TYPE* f, // array of L's elements int ldf, // leading dimension of f ELEMENT_TYPE* fd, // array of (L*D)'s elements int ldfd, // leading dimension of fd ELEMENT_TYPE* d, // array of D**(-1)'s diagonal and subdiagonal elements ELEMENT_TYPE delta, // pivoting threshold ELEMENT_TYPE eps, // zero column threshold: // the column is zeroed if all elements are <= eps int* index, // pivot index (cf. permutation matrix P) int* stat // n.o. successful pivots ) { dev_block_ldlt< ELEMENT_TYPE, TILE_SIZE, TILES > ( blockIdx.x, nrows, ncols, offp, a, lda, f, ldf, fd, ldfd, d, delta, eps, index, stat ); return; } // Same as cu_block_fact but for several A's of different size simultaneously // // Called by multinode_ldlt factorization subroutine. // template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_multiblock_ldlt( struct multiblock_fact_type *mbfdata, // factorization data ELEMENT_TYPE* f, // same for L ELEMENT_TYPE delta, // same as in cu_block_fact ELEMENT_TYPE eps, // same as in cu_block_fact int* index, // array of all pivot indices int* stat // array of successful pivots' numbers ) { /* * Read information on what to do from global memory */ mbfdata += blockIdx.x; // shift to the data for this CUDA block int ncols = mbfdata->ncols; // n.o. cols in A processed by this CUDA block if ( ncols < 1 ) return; int nrows = mbfdata->nrows; // n.o. rows in A int lda = mbfdata->ld; // leading dimension of A int p = mbfdata->p; // n.o. rows in A_u int node = mbfdata->node; // A's number int block = mbfdata->offb; // relative CUDA block index f += mbfdata->offf; // shift to the array of this L elements double *fd = mbfdata->ldptr; double *a = mbfdata->aptr; // pointer to A double *d = mbfdata->dptr; // pointer to D**(-1) dev_block_ldlt < double, TILE_SIZE, TILES > ( block, nrows, ncols, p, a, lda, f, lda, fd, lda, d, delta, eps, &index[node*TILE_SIZE], &stat[node]); } /* LDLT factorization kernel for the root delays block. The columns which the above factorization kernels failed to pivot are delayed, ie left unchanged, until some other columns in the same node are successfully pivoted, after which pivoting of delayed columns is attempted again. When a factorization subroutine terminates, generally there still may be delayed columns which this subroutine cannot possibly pivot, and they are passed on to the parent node in the elimination tree. At the root node, however, this is not possible, and a special kernel given below is applied to delayed columns, which together with the respective rows now form a square block at the lower left corner of the root node matrix. The main difference between the factorization kernel below and those above is that the pivot is sought in the whole matrix because, in the above notation, blocks A_u and A_l are no longer present. Since this matrix may be too large to fit into shared memory, the kernel below works mostly in the global memory (shared memory is only used for finding the largest element of a column). */ template< typename ELEMENT_TYPE > __global__ void cu_square_ldlt( int n, ELEMENT_TYPE* a, // A on input, L on output ELEMENT_TYPE* f, // L ELEMENT_TYPE* w, // L*D ELEMENT_TYPE* d, // main diag and subdiag of the inverse of D int ld, // leading dimension of a, f, w ELEMENT_TYPE delta, // same as above ELEMENT_TYPE eps, // same as above int* ind, // same as in cu_block_fact int* stat // same as in cu_block_fact ) { int x, y; int col; int ip, jp; int pivoted, recent; ELEMENT_TYPE a11, a12, a22, det; ELEMENT_TYPE* work = (ELEMENT_TYPE*)SharedMemory; // work array int* iwork = (int*)&work[blockDim.x]; // integer work array int* iw = (int*)&iwork[blockDim.x]; // iw[0]: failure flag, // iw[1]: largest col. elem. index for ( x = threadIdx.x; x < n; x += blockDim.x ) { ind[x] = 0; // initialize pivot index/processed columns mask for ( y = 0; y < n; y++ ) f[x + ld*y] = a[x + ld*y]; // copy A to L } for ( x = threadIdx.x; x < 2*n; x += blockDim.x ) d[x] = 0.0; // initialize D __syncthreads(); pivoted = 0; // n.o. pivoted cols for ( int pass = 0; ; pass++ ) { // failed cols are skipped until next pass recent = 0; // n.o. cols pivoted during this pass for ( col = 0; col < n; ) { if ( ind[col] ) { col++; // already pivoted, move on continue; } if ( threadIdx.x == 0 ) iw[0] = 0; // initialize failure flag __syncthreads(); // find the largest element in the pending column // // first, each thread finds its candidate for the largest one a11 = -1.0; y = -1; for ( x = threadIdx.x; x < n; x += blockDim.x ) { if ( ind[x] == 0 ) { a12 = abs(f[x + ld*col]); if ( a12 >= a11 ) { a11 = a12; y = x; } } } work[threadIdx.x] = a11; // the largest one for this thread iwork[threadIdx.x] = y; // its index __syncthreads(); // now first 8 threads reduce the number of candidates to 8 if ( threadIdx.x < 8 ) { for ( x = threadIdx.x + 8; x < blockDim.x; x += 8 ) if ( iwork[x] >= 0 && work[x] > work[threadIdx.x] ) { work[threadIdx.x] = work[x]; iwork[threadIdx.x] = iwork[x]; } } __syncthreads(); // the first thread finds the largest element and its index if ( threadIdx.x == 0 ) { y = 0; for ( x = 1; x < 8 && x < blockDim.x; x++ ) if ( iwork[x] >= 0 && (iwork[y] < 0 || work[x] > work[y]) ) y = x; iw[1] = iwork[y]; // the largest element index } __syncthreads(); // select the pivot based on the largest element index ip = col; jp = iw[1]; dev_select_pivots_at_root< ELEMENT_TYPE > ( f, ld, ip, jp, a11, a12, a22, det ); // try to pivot if ( ip == jp ) { // 1x1 pivot for ( x = threadIdx.x; x < n; x += blockDim.x ) if ( ind[x] == 0 ) if ( dev_1x1_pivot_fails< ELEMENT_TYPE > ( x, ip, f, w, ld, det, delta, eps ) ) iw[0] = 1; } else { // 2x2 pivot for ( x = threadIdx.x; x < n; x += blockDim.x ) if ( ind[x] == 0 ) if ( dev_2x2_pivot_fails< ELEMENT_TYPE > ( x, ip, jp, f, w, ld, a11, a12, a22, det, delta, eps ) ) iw[0] = 1; } __syncthreads(); if ( iw[0] ) { // pivot failed, restore the failed column(s) for ( x = threadIdx.x; x < n; x += blockDim.x ) { if ( ind[x] ) continue; f[x + ld*ip] = w[x + ld*ip]; if ( ip != jp ) f[x + ld*jp] = w[x + ld*jp]; } __syncthreads(); col++; // move on continue; } if ( threadIdx.x == 0 ) { // mark pivoted columns and invert the pivot if possible ind[ip] = pivoted + 1; if ( ip == jp ) { if ( abs(det) > eps ) // ok to invert d[2*pivoted] = 1.0/det; } else { ind[jp] = pivoted + 2; if ( abs(det) > abs(a11)*abs(a22)*1.0e-15 && abs(det) > eps*(abs(a11) + abs(a22) + abs(a12)) ) { // ok to invert d[2*pivoted ] = a22/det; d[2*pivoted + 1] = -a12/det; d[2*pivoted + 2] = a11/det; } } } __syncthreads(); // update pivot counters if ( ip == jp ) { pivoted++; recent++; } else { pivoted += 2; recent += 2; } // eliminate pivoted columns from non-processed if ( ip == jp ) { for ( x = threadIdx.x; x < n; x += blockDim.x ) for ( y = 0; y < n; y++ ) if ( x != ip && ind[y] == 0 ) f[x + ld*y] -= f[x + ld*ip] * f[ip + ld*y]; } else { for ( x = threadIdx.x; x < n; x += blockDim.x ) { for ( y = 0; y < n; y++ ) { if ( x != ip && x != jp && ind[y] == 0 ) { f[x + ld*y] -= f[x + ld*ip] * f[ip + ld*y] + f[x + ld*jp] * f[jp + ld*y]; } } } } __syncthreads(); if ( ip == col ) // this column is pivoted, move on col++; } // loop across columns if ( pivoted == n // all done || recent == 0 ) // no pivotable columns left break; } // pass if ( threadIdx.x == 0 ) stat[0] = pivoted; if ( pivoted < n ) // factorization failed return; // copy L to A for ( x = threadIdx.x; x < n; x += blockDim.x ) for ( y = 0; y < n; y++ ) a[ind[x] - 1 + ld*(ind[y] - 1)] = f[x + ld*y]; } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_multiblock_chol( struct multiblock_fact_type *mbfdata, ELEMENT_TYPE* f, // array of L nodes int* stat // execution status ) { /* * Read information on what to do from global memory */ mbfdata += blockIdx.x; int ncols = mbfdata->ncols; if ( ncols < 1 ) return; int nrows = mbfdata->nrows; int ld = mbfdata->ld; int node = mbfdata->node; int block = mbfdata->offb; ELEMENT_TYPE *a = mbfdata->aptr; f += mbfdata->offf; stat += node; dev_block_chol< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, a, ld, f, ld, stat ); } struct cstat_data_type { int nelim; double *dval; }; __global__ void cu_collect_stats( const struct cstat_data_type *csdata, struct cuda_stats *stats ) { // Designed to be run with a single thread csdata += blockIdx.x; double *d = csdata->dval; int nelim = csdata->nelim; int num_zero = 0; int num_neg = 0; int num_two = 0; for(int i = 0; i<nelim; ) { double a11 = d[2*i]; double a21 = d[2*i + 1]; if ( a21 == 0.0 ) { // 1x1 pivot (can be a zero pivot) if ( a11 == 0 ) num_zero++; if ( a11 < 0 ) num_neg++; i++; } else { // 2x2 pivot (can't be a zero pivot) double a22 = d[2*(i + 1)]; num_two++; // To check for negative eigenvalues, we exploit // det = product of evals // trace = sum of evals // if det is negative, exactly one eval is negative; // otherwise, both have same sign, equal to sign of trace double det = a11*a22 - a21*a21; double trace = a11 + a22; if ( det < 0 ) num_neg++; else if ( trace < 0 ) num_neg += 2; i += 2; } } if ( num_neg > 0 ) atomicAdd(&(stats->num_neg), num_neg); if ( num_zero > 0 ) atomicAdd(&(stats->num_zero), num_zero); if ( num_two > 0 ) atomicAdd(&(stats->num_two), num_two); } } /* anon namespace */ /******************************************************************************* * Following routines are exported with C binding so can be called from Fortran ******************************************************************************/ extern "C" { void spral_ssids_block_ldlt( hipStream_t *stream, int nrows, int ncols, int p, double* a, int lda, double* f, int ldf, double* fd, int ldfd, double* d, double delta, double eps, int* index, int* stat ) { int nblocks = (nrows - ncols - 1)/(BLOCK_SIZE*(BLOCKS - 1)) + 1; hipLaunchKernelGGL(( cu_block_ldlt_init), dim3(1), dim3(BLOCK_SIZE), 0, *stream , ncols, stat, index ); dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); hipLaunchKernelGGL(( cu_block_ldlt < double, BLOCK_SIZE, BLOCKS >) , dim3(nblocks), dim3(threads), 0, *stream , nrows, ncols, p, a, lda, f, ldf, fd, ldfd, d, delta, eps, index, stat ); } void spral_ssids_block_llt( hipStream_t *stream, int nrows, int ncols, double* a, int lda, double* f, int ldf, int* stat ) { int smsize = CBLOCKS*BLOCK_SIZE*BLOCK_SIZE*sizeof(double); int nblocks = (nrows - ncols - 1)/(BLOCK_SIZE*(CBLOCKS - 1)) + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( cu_block_chol < double, BLOCK_SIZE, CBLOCKS >) , dim3(nblocks), dim3(threads), smsize, *stream , nrows, ncols, a, lda, f, ldf, stat ); } void spral_ssids_collect_stats(hipStream_t *stream, int nblk, const struct cstat_data_type *csdata, struct cuda_stats *stats) { for(int i=0; i<nblk; i+=MAX_CUDA_BLOCKS) { int nb = min(MAX_CUDA_BLOCKS, nblk-i); hipLaunchKernelGGL(( cu_collect_stats) , dim3(nb), dim3(1), 0, *stream, csdata+i, stats); CudaCheckError(); } } void spral_ssids_multiblock_ldlt( hipStream_t *stream, int nblocks, struct multiblock_fact_type *mbfdata, double* f, double delta, double eps, int* index, int* stat ) { dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); hipLaunchKernelGGL(( cu_multiblock_ldlt < double, BLOCK_SIZE, MBLOCKS >) , dim3(nb), dim3(threads), 0, *stream , mbfdata + i, f, delta, eps, index, stat ); } } void spral_ssids_multiblock_ldlt_setup( hipStream_t *stream, int nblocks, struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int* stat, int* ind, int* ncb ) { dim3 threads(10,8); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); hipLaunchKernelGGL(( cu_multiblock_fact_setup) , dim3(nb), dim3(threads), 0, *stream , ndata + i, mbfdata, step, block_size, blocks, i, stat + i, ind + block_size*i, ncb ); } } void spral_ssids_multiblock_llt( hipStream_t *stream, int nblocks, struct multiblock_fact_type *mbfdata, double* f, int* stat ) { if ( nblocks < 1 ) return; int smsize = MCBLOCKS*BLOCK_SIZE*BLOCK_SIZE*sizeof(double); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); hipLaunchKernelGGL(( cu_multiblock_chol < double, BLOCK_SIZE, MCBLOCKS >) , dim3(nb), dim3(threads), smsize, *stream , mbfdata + i, f, stat ); } } void spral_ssids_multiblock_llt_setup( hipStream_t *stream, int nblocks, struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int* stat, int* ncb ) { dim3 threads(10,8); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); hipLaunchKernelGGL(( cu_multiblock_fact_setup) , dim3(nb), dim3(threads), 0, *stream , ndata + i, mbfdata, step, block_size, blocks, i, stat + i, 0, ncb ); } } void spral_ssids_square_ldlt( hipStream_t *stream, int n, double* a, double* f, double* w, double* d, int ld, double delta, double eps, int* index, int* stat ) { int nt = min(n, 256); int sm = nt*sizeof(double) + (nt + 2)*sizeof(int); hipLaunchKernelGGL(( cu_square_ldlt< double >), dim3(1), dim3(nt), sm, *stream , n, a, f, w, d, ld, delta, eps, index, stat ); } } // end extern "C"
48a8e58d59fa8bad5e3814b236c857de381328e8.cu
/* Copyright (c) 2013 Science and Technology Facilities Council (STFC) * Authors: Evgueni Ovtchinnikov and Jonathan Hogg * * This file contains CUDA kernels for partial LL^T and LDL^T factorization * of dense submatrices. */ #include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include "ssids/gpu/kernels/datatypes.h" #include "cuda/cuda_check.h" #define min(x,y) ((x) < (y) ? (x) : (y)) #define FAVOUR2x2 100 #define CBLOCKS 3 #define MCBLOCKS 8 #define BLOCKS 7 #define MBLOCKS 11 #define BLOCK_SIZE 8 #define MAX_CUDA_BLOCKS 65535 using namespace spral::ssids::gpu; namespace /* anon */ { extern __shared__ char SharedMemory[]; __global__ void cu_block_ldlt_init( int ncols, int* stat, int* ind ) { if ( threadIdx.x == 0 ) { stat[0] = ncols; // successful pivots stat[1] = 0; } if ( threadIdx.x < ncols ) ind[threadIdx.x] = ncols + 1; } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_chol_fact( unsigned int block, int nrows, // number of rows of the factorized matrix int ncols, // number of columns thereof ELEMENT_TYPE* a, // array of elements of A int lda, // leading dimension of a ELEMENT_TYPE* fs // initial L factor (shared mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x; // row index for ( int tile = 0; tile < TILES; tile++ ) { if ( tile ) { // load A's offdiagonal tiles into shared memory x = ncols + threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; // offdiagonal row index in A fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y] = ( x < nrows && threadIdx.y < ncols ) ? a[x + lda*threadIdx.y] : 0.0; } else { // load the diagonal (pivot) tile fs[threadIdx.x + SIZE_X*threadIdx.y] = ( threadIdx.x < ncols && threadIdx.y < ncols ) ? a[threadIdx.x + lda*threadIdx.y] : 0.0; } } } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_save_chol_fact( unsigned int block, int nrows, // number of rows of the factorized matrix int ncols, // number of columns thereof ELEMENT_TYPE* fs, // initial L factor (shared mem) ELEMENT_TYPE* f, // array of elements of L int ldf // leading dimension of f ) { const int SIZE_X = TILES*TILE_SIZE; int x; // row index for ( int tile = 0; tile < TILES; tile++ ) { if ( tile ) { // upload the relevant elements of fs to f x = ncols + threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x < nrows && threadIdx.y < ncols ) f[x + ldf*threadIdx.y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; } else if ( block == 0 ) { // upload to f and fd if ( threadIdx.x < ncols && threadIdx.y < ncols ) f[threadIdx.x + ldf*threadIdx.y] = fs[threadIdx.x + SIZE_X*threadIdx.y]; } } // loop through tiles ends here } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_block_chol( int block, int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* f, int ldf, int* stat ) { const int SIZE_X = TILES*TILE_SIZE; int ip; ELEMENT_TYPE v; ELEMENT_TYPE *work = (ELEMENT_TYPE*)SharedMemory; // load A into shared memory dev_init_chol_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, a, lda, work ); __syncthreads(); for ( ip = 0; ip < ncols; ip++ ) { v = work[ip + SIZE_X*ip]; if ( v <= 0.0 ) { if ( block == 0 && threadIdx.x == 0 && threadIdx.y == 0 ) stat[0] = ip; return; } v = sqrt(v); __syncthreads(); if ( threadIdx.y < TILES ) work[threadIdx.x + TILE_SIZE*threadIdx.y + SIZE_X*ip] /= v; __syncthreads(); if ( threadIdx.y > ip && threadIdx.y < ncols ) { for ( int x = threadIdx.x + TILE_SIZE; x < SIZE_X; x += TILE_SIZE ) work[x + SIZE_X*threadIdx.y] -= work[threadIdx.y + SIZE_X*ip]*work[x + SIZE_X*ip]; if ( threadIdx.x > ip ) work[threadIdx.x + SIZE_X*threadIdx.y] -= work[threadIdx.y + SIZE_X*ip] *work[threadIdx.x + SIZE_X*ip]; } __syncthreads(); } if ( block == 0 && threadIdx.x == 0 && threadIdx.y == 0 ) stat[0] = ncols; // save the L factor dev_save_chol_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, work, f, ldf ); } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_block_chol( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* f, int ldf, int* stat ) { dev_block_chol< ELEMENT_TYPE, TILE_SIZE, TILES > ( blockIdx.x, nrows, ncols, a, lda, f, ldf, stat ); } struct multinode_chol_type { int nrows; int ncols; double *lcol; }; // input data type for multiblock_fact and multiblock_chol // each CUDA block gets a copy struct multiblock_fact_type { int nrows; // no node's rows int ncols; // no node's cols int ld; // node's leading dimension int p; // no rows above the pivot block double *aptr; // pointer to this node's A matrix double *ldptr; // pointer to this node's LD matrix int offf; // this node's L offset in the array of all Ls double *dptr; // pointer to this node's D in array of all Ds int node; // node index int offb; // the idx of the first CUDA block processing this node }; __global__ void cu_multiblock_fact_setup( struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int offb, int* stat, int* ind, int* nl ) { ndata += blockIdx.x; int ncols = ndata->ncols; int nrows = ndata->nrows; double *lval = ndata->lval; double *ldval = ndata->ldval; double *dval = ndata->dval; int ib = ndata->ib; int jb = ndata->jb; int done = ndata->done; int rght = ndata->rght; int lbuf = ndata->lbuf; if ( jb < ib ) return; int pivoted = stat[blockIdx.x]; if ( pivoted > 0 ) { done += pivoted; if ( jb == rght ) jb = done; } if ( jb <= ncols ) ib = jb + 1; __syncthreads(); if ( threadIdx.x == 0 && threadIdx.y == 0 ) { ndata->ib = ib; ndata->jb = jb; ndata->done = done; } if ( ib > ncols ) return; if ( ib > rght ) { rght += step; if ( rght > ncols ) rght = ncols; if ( threadIdx.x == 0 && threadIdx.y == 0 ) { ndata->rght = rght; } } int rb = nrows - done; int cb = rght - ib + 1; if ( cb > block_size ) cb = block_size; if ( threadIdx.x == 0 && threadIdx.y == 0 ) { ndata->jb = jb + cb; stat[blockIdx.x] = cb; // successful pivots } if ( ind && threadIdx.x < cb && threadIdx.y == 0 ) ind[blockIdx.x*block_size + threadIdx.x] = cb + 1; int k = (rb - cb - 1)/(block_size*(blocks - 1)) + 1; __shared__ int ncb; if ( threadIdx.x == 0 && threadIdx.y == 0 ) ncb = atomicAdd(&nl[0], k); __shared__ int iwork[9]; __shared__ double *lptr, *ldptr, *dptr; if ( threadIdx.x == 0 && threadIdx.y == 0 ) { iwork[0] = cb; iwork[1] = rb; iwork[2] = nrows; iwork[3] = ib - done - 1; lptr = lval + done + (ib - 1)*nrows; ldptr = ldval + done + (ib - 1)*nrows; iwork[5] = lbuf + done; dptr = dval + 2*done; iwork[7] = offb + blockIdx.x; iwork[8] = ncb; } __syncthreads(); for ( int i = threadIdx.y; i < k; i += blockDim.y ) { switch(threadIdx.x) { case 0: mbfdata[ncb+i].ncols = iwork[0]; break; case 1: mbfdata[ncb+i].nrows = iwork[1]; break; case 2: mbfdata[ncb+i].ld = iwork[2]; break; case 3: mbfdata[ncb+i].p = iwork[3]; break; case 4: mbfdata[ncb+i].aptr = lptr; mbfdata[ncb+i].ldptr = ldptr; break; case 5: mbfdata[ncb+i].offf = iwork[5]; break; case 6: mbfdata[ncb+i].dptr = dptr; break; case 7: mbfdata[ncb+i].node = iwork[7]; break; case 8: mbfdata[ncb+i].offb = i; break; } } } //////////////////////////////////////////////////////////////////////////// /* Functions below participate in the LDLT factorization | A_u P| |L_u| Q A P = |P^T A_d P| = |L_d| * D * (L_d)^T = L * D * (L_d)^T (LDLT) | A_l P| |L_l| where A is nrows x ncols, P is a ncols x ncols permutation matrix, |I_u | Q = | P^T |, where I_u and I_l are identities, | I_l| L_d is a ncols x ncols lower triangular matrix with unit main diagonal and D is a ncols x ncols block diagonal matrix with 1x1 and 2x2 blocks on the main diagonal. Common variable names: nrow number of rows in A/L ncols numbre of columns in A/L offp number of rows in A_u */ //////////////////////////////////////////////////////////////////////////// /* The next function initializes L and the main diagonal and subdiagonal of D**(-1). L and L*D are stored in two shared memory arrays fs and fds, each arranged into TILES square tiles of size TILE_SIZE. The kernel for factorizing just one node uses TILES = 7, and the one for simultaneous factorization of several nodes uses TILES = 11. Each CUDA block uses dev_init_fact to load A_d into the first tile of fs and up to (TILES - 1)*TILE_SIZE rows of A_u and A_l into the remaining TILES - 1 tiles. The two diagonals of D**(-1) are stored in a shared memory array of size 2*TILE_SIZE, initialized to 0 by this kernel. */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_fact( unsigned int block, // relative CUDA block number int nrows, int ncols, int offp, ELEMENT_TYPE* a, // array of elements of A int lda, // leading dimension of a ELEMENT_TYPE* fs, // initial L factor (shared mem) ELEMENT_TYPE* ds // initial D**(-1) (shared mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x, y; // position indices y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread if ( threadIdx.y < TILE_SIZE ) { for ( int tile = 0; tile < TILES; tile += 2 ) { if ( tile ) { // load A_u and A_l's even tiles into shared memory x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; // offdiagonal row index in A if ( x >= offp ) x += ncols; // skip A_d fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y] = ( x < nrows && threadIdx.y < ncols ) ? a[x + lda*threadIdx.y] : 0.0; } else { // load A_d fs[threadIdx.x + SIZE_X*threadIdx.y] = ( threadIdx.x < ncols && threadIdx.y < ncols ) ? a[offp + threadIdx.x + lda*threadIdx.y] : 0.0; } } } else { // load A_u and A_l's odd tiles into shared memory for ( int tile = 1; tile < TILES; tile += 2 ) { x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) x += ncols; fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*y] = ( x < nrows && y < ncols ) ? a[x + lda*y] : 0.0; } } // main diagonal and subdiagonal of D**(-1) set to 0 if ( threadIdx.y < 2 ) ds[2*threadIdx.x + threadIdx.y] = 0.0; } /* The next function uploads L, L*D and D to global memory */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_save_fact( unsigned int block, int nrows, int ncols, int offp, int my, // save only if my is non-zero ELEMENT_TYPE* fs, // L (shared mem) ELEMENT_TYPE* fds, // L*D (shared mem) ELEMENT_TYPE* ds, // 2 diags of D**(-1) (shared mem) ELEMENT_TYPE* f, // L (global mem) int ldf, // leading dimension of f ELEMENT_TYPE* fd, // L*D (global mem) int ldfd, // leading dimension of fd ELEMENT_TYPE* d // 2 diags of D**(-1) (global mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x, y; // position indices y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread if ( threadIdx.y < TILE_SIZE ) { // warps 0, 1 for ( int tile = 0; tile < TILES; tile += 2 ) { if ( tile ) { // upload L_u, L_l, L_u*D and L_l*D's even tiles x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) // skip L_d x += ncols; if ( x < nrows && threadIdx.y < ncols && my ) { f[x + ldf*threadIdx.y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; fd[x + ldfd*threadIdx.y] = fds[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; } } else if ( block == 0 ) { // upload L_d and L_d*D if ( threadIdx.x < ncols && threadIdx.y < ncols && my ) { f[offp + threadIdx.x + ldf*threadIdx.y] = fs[threadIdx.x + SIZE_X*threadIdx.y]; fd[offp + threadIdx.x + ldfd*threadIdx.y] = fds[threadIdx.x + SIZE_X*threadIdx.y]; } // upload D**(-1) if ( threadIdx.x < 2 && threadIdx.y < ncols ) d[threadIdx.x + 2*threadIdx.y] = ds[threadIdx.x + 2*threadIdx.y]; } } // loop through even tiles ends here } else { // upload L_u, L_l, L_u*D and L_l*D's odd tiles (warps 2, 3) for ( int tile = 1; tile < TILES; tile += 2 ) { x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) // skip L_d x += ncols; if ( x < nrows && y < ncols && my ) { f[x + ldf*y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*y]; fd[x + ldfd*y] = fds[threadIdx.x + tile*TILE_SIZE + SIZE_X*y]; } } } } /* The next function finds the largest element of the first row of A_d */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_max( int ncols, const ELEMENT_TYPE* fs, int mx, // this thread mask int* mask, // pivot index/mask bool* not_max, // "not largest" flag int& jps, // the index of the largest element int& quit // pivoting failure flag ) { const int SIZE_X = TILES*TILE_SIZE; if ( threadIdx.y == 0 ) { mask[threadIdx.x] = mx; // initialize the pivot index not_max[threadIdx.x] = mx; // initialize the "not largest" flag } if ( threadIdx.x == 0 && threadIdx.y == 0 ) { jps = TILE_SIZE; // initialize pivot col jp: cf the case of a tie below quit = 0; // initialize failure flag } __syncthreads(); // check if the element in the column threadIdx.x // of the first row is (one of) the largest one(s) if ( threadIdx.x < ncols && threadIdx.y < ncols && threadIdx.x != threadIdx.y && abs(fs[SIZE_X*threadIdx.x]) < abs(fs[SIZE_X*threadIdx.y]) ) not_max[threadIdx.x] = 1; // no good: a larger value exists elsewhere __syncthreads(); // select the leftmost among the largest elements of the row if ( threadIdx.y == 0 && not_max[threadIdx.x] == 0 ) atomicMin(&jps, threadIdx.x); // in case of a tie, choose the leftmost __syncthreads(); } /* The next function selects pivot based on the pending row number ip and the column number for the largest element in this row. Three options are considered: (1) use 1x1 pivot a11 = fs[ip + ld*ip], (2) use 1x1 pivot a22 = fs[jp + ld*jp], (3) use 2x2 pivot | a_11 a_12 | | a_12 a_22 |, where a12 = fs[ip + ld*jp]. The pivot that has the smallest inverse is selected. */ template< typename ELEMENT_TYPE > __device__ void dev_select_pivots_at_root( const ELEMENT_TYPE* fs, int ld, // leading dimension of fs int& ip, int& jp, ELEMENT_TYPE& a11, ELEMENT_TYPE& a12, ELEMENT_TYPE& a22, ELEMENT_TYPE& det ) { // select the pivot based on the row's largest element index if ( ip != jp ) { // choose between 1x1 and 2x2 pivots a11 = fs[ip + ld*ip]; a12 = fs[ip + ld*jp]; a22 = fs[jp + ld*jp]; det = a11*a22 - a12*a12; // determinant of 2x2 pivot stored in det if ( (abs(a12) + abs(a11) + abs(a22))*abs(a11) > abs(det) ) { if ( abs(a11) > abs(a22) ) { // choose the best 1x1 alternative jp = ip; // select a11 det = a11; // pivot value stored in det } else { ip = jp; // select a22 det = a22; // pivot value stored in det } } else if ( (abs(a12) + abs(a11) + abs(a22))*abs(a22) > abs(det) ) { ip = jp; // select a22 det = a22; // pivot value stored in det } } else det = fs[ip + ld*ip]; // pivot value stored in det } template< typename ELEMENT_TYPE > __device__ void dev_select_pivots( const ELEMENT_TYPE* fs, int ld, // leading dimension of fs int& ip, int& jp, ELEMENT_TYPE& a11, ELEMENT_TYPE& a12, ELEMENT_TYPE& a22, ELEMENT_TYPE& det ) { // select the pivot based on the row's largest element index if ( ip != jp ) { // choose between 1x1 and 2x2 pivots a11 = fs[ip + ld*ip]; a12 = fs[ip + ld*jp]; a22 = fs[jp + ld*jp]; det = a11*a22 - a12*a12; // determinant of 2x2 pivot stored in det if ( (abs(a12) + abs(a11) + abs(a22))*abs(a11) > FAVOUR2x2*abs(det) ) { if ( abs(a11) > abs(a22) ) { // choose the best 1x1 alternative jp = ip; // select a11 det = a11; // pivot value stored in det } else { ip = jp; // select a22 det = a22; // pivot value stored in det } } else if ( (abs(a12) + abs(a11) + abs(a22))*abs(a22) > FAVOUR2x2*abs(det) ) { ip = jp; // select a22 det = a22; // pivot value stored in det } } else det = fs[ip + ld*ip]; // pivot value stored in det } /* The next function tries to apply 1x1 pivot. */ template< typename ELEMENT_TYPE > __device__ bool dev_1x1_pivot_fails( int x, int ip, ELEMENT_TYPE* fs, ELEMENT_TYPE* fds, int ld, ELEMENT_TYPE det, ELEMENT_TYPE delta, ELEMENT_TYPE eps ) { // the column of fds is that of fs before the division by pivot ELEMENT_TYPE u = fds[x + ld*ip] = fs[x + ld*ip]; if ( abs(det) <= eps ) { // the pivot is considered to be zero if ( abs(u) <= eps ) { // the off-diagonal is considered to be zero if ( x == ip ) fs[x + ld*ip] = 1.0; else fs[x + ld*ip] = 0.0; } else { // non-zero off-diagonal element found -> return 1; // this column to be delayed } } else if ( abs(det) <= delta*abs(u) ) // pivot too small -> return 1; // this column to be delayed else fs[x + ld*ip] = u/det; // ok to divide return 0; } /* The next function tries to apply 1x1 pivot. */ template< typename ELEMENT_TYPE > __device__ bool dev_2x2_pivot_fails( int x, int ip, int jp, ELEMENT_TYPE* fs, ELEMENT_TYPE* fds, int ld, ELEMENT_TYPE a11, ELEMENT_TYPE a12, ELEMENT_TYPE a22, ELEMENT_TYPE det, ELEMENT_TYPE delta, ELEMENT_TYPE eps ) { // the columns of fds is those of fd before division by pivot ELEMENT_TYPE u = fds[x + ld*ip] = fs[x + ld*ip]; ELEMENT_TYPE v = fds[x + ld*jp] = fs[x + ld*jp]; if ( abs(det) <= abs(a11)*abs(a22)*1.0e-15 || // the determinant is smaller than round-off errors -> // the pivot is considered to be zero abs(det) <= eps*(abs(a11) + abs(a22) + abs(a12)) // the inverse of the pivot is of the order 1/eps -> // the pivot is considered to be zero ) { if ( max(abs(u), abs(v)) <= eps ) { // the off-diagonal is "zero" if ( x == ip ) { fs[x + ld*ip] = 1.0; fs[x + ld*jp] = 0.0; } else if ( x == jp ) { fs[x + ld*ip] = 0.0; fs[x + ld*jp] = 1.0; } else { fs[x + ld*ip] = 0.0; fs[x + ld*jp] = 0.0; } } else // non-zero off-diagonal element found -> return 1; // this column to be delayed } else if ( abs(det) <= delta*max(abs(a22*u - a12*v), abs(a11*v - a12*u)) ) // pivot too small -> return 1; // this column to be delayed else { // ok to divide fs[x + ld*ip] = (a22*u - a12*v)/det; fs[x + ld*jp] = (a11*v - a12*u)/det; } return 0; } /* The next function eliminates the pivoted column from non-pivoted */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES // = 7 for a single node and = 11 for many nodes > __device__ void dev_eliminate_1x1( int& x, // row for this thread int y, // column for this thread int ip, // pivoted column ELEMENT_TYPE* fs, int ld, ELEMENT_TYPE p // pivot value ) { if ( x != ip ) fs[x + ld*y] -= p * fs[x + ld*ip]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; if ( TILES == 11 ) { // several nodes case x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; } } /* The next function eliminates the two pivoted columns from non-pivoted */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_eliminate_2x2( int& x, int y, int ip, int jp, ELEMENT_TYPE* fs, int ld, ELEMENT_TYPE pi, ELEMENT_TYPE pj ) { if ( x != ip && x != jp ) fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; if ( TILES == 11 ) { // several nodes case x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; } } /* The next function performs elimination in one tile only */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE > inline __device__ void dev_eliminate( int& x, int y, int ip, int jp, ELEMENT_TYPE* fs, int ld, ELEMENT_TYPE pi, ELEMENT_TYPE pj ) { x += TILE_SIZE; if ( ip == jp ) fs[x + ld*y] -= pi * fs[x + ld*ip]; else fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; } /* Performs the factorization (LDLT). The outline of the factorization algorithm is as follows. 1. L = A 2. A diagonal block of L of size 1 or 2 is selected 3. A division of the corresponding (one or two) columns of L by the selected block (pivoting) is considered and is accepted only if the elements of the resulting columns are not going to be greater than the inverse of the "pivoting threshold" delta; otherwise kernel terminates. 4. If not all columns are pivoted, go to 2. Called by cu_block_ldlt and cu_multiblock_ldlt factorization kernels. */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_block_ldlt( unsigned int block, int nrows, // number of rows of the factorized matrix int ncols, // number of columns thereof int offp, // number of rows above the pivot block ELEMENT_TYPE* a, // array of elements of A int lda, // leading dimension of a ELEMENT_TYPE* f, // array of elements of the L factor int ldf, // leading dimension of f ELEMENT_TYPE* fd, // array of elements of L*D int ldfd, // leading dimension of fd ELEMENT_TYPE* d, // array for main diagonal and subdiagonal of D ELEMENT_TYPE delta, // pivoting threashold ELEMENT_TYPE eps, // zero pivot threashold int* index, // pivot order index int* stat // number of successful pivots ) { const int SIZE_X = TILES*TILE_SIZE; int ip, jp; // pivot row and col indices int x, y; // position indices int mx, my; // masks ELEMENT_TYPE a11, a12, a22, det; // 2x2 pivot data __shared__ ELEMENT_TYPE fs[SIZE_X*TILE_SIZE]; // work array for f __shared__ ELEMENT_TYPE fds[SIZE_X*TILE_SIZE]; // work array for fd __shared__ ELEMENT_TYPE ds[2*TILE_SIZE]; // work array for d __shared__ int mask[TILE_SIZE]; // pivot mask/index __shared__ bool not_max[TILE_SIZE]; // flag for finding the largest row elm __shared__ int quit; // failure flag __shared__ int jps; // pivot column index y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread // load the diagonal and off-diagonal tiles into shared memory dev_init_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, offp, a, lda, fs, ds ); mx = (threadIdx.x < ncols ? 0 : ncols + 1); // initial pivot index // find the largest element in the first row dev_init_max< ELEMENT_TYPE, TILE_SIZE, TILES > ( ncols, fs, mx, mask, not_max, jps, quit ); for ( int row = 0, pivoted = 0; row < ncols; ) { // select the pivot based on the row's largest element index jps ip = row; jp = jps; dev_select_pivots< ELEMENT_TYPE > ( fs, SIZE_X, ip, jp, a11, a12, a22, det ); __syncthreads(); if ( threadIdx.y < TILE_SIZE + 4 ) { // the first 3 warps try to pivot x = threadIdx.x + TILE_SIZE*threadIdx.y; // fs/fds row to process if ( x < SIZE_X && (threadIdx.y || mx == 0 || mx > ncols) ) { // elements of the pivot block that should have been // zeroed by elimination are ignored if ( ip == jp ) { // 1x1 pivot if ( dev_1x1_pivot_fails< ELEMENT_TYPE > ( x, ip, fs, fds, SIZE_X, det, delta, eps ) ) quit = 1; } else { // 2x2 pivot if ( dev_2x2_pivot_fails< ELEMENT_TYPE > ( x, ip, jp, fs, fds, SIZE_X, a11, a12, a22, det, delta, eps ) ) quit = 1; } } } else { // meanwhile, one thread of the fourth warp is inverting the pivot if ( threadIdx.x == 0 && threadIdx.y == TILE_SIZE + 4 ) { mask[ip] = pivoted + 1; // assume pivot is ok for now if ( ip == jp ) { if ( abs(det) > eps ) ds[2*pivoted] = 1.0/det; // ok to invert } else { mask[jp] = pivoted + 2; // assume pivot is ok for now if ( abs(det) > abs(a11)*abs(a22)*1.0e-15 && abs(det) > eps*(abs(a11) + abs(a22) + abs(a12)) ) { ds[2*pivoted ] = a22/det; ds[2*pivoted + 1] = -a12/det; ds[2*pivoted + 2] = a11/det; } } if ( atomicMin(&stat[0], ncols) <= pivoted ) quit = 1; // some other CUDA block failed to pivot this column } } // warp fork ends here __syncthreads(); if ( quit ) { if ( threadIdx.x == 0 && threadIdx.y == 0 ) { atomicMin(&stat[0], pivoted); // record the failure in stat[0] // column(s) should not be saved - mark as non-processed mask[ip] = 0; if ( ip != jp ) mask[jp] = 0; } __syncthreads(); break; // done } // update successful pivots count if ( ip == jp ) pivoted++; else pivoted += 2; // find next pivot row to process if ( ip == row ) row++; // move forward only if this row participated in pivoting while ( row < ncols && mask[row] ) row++; // skip processed rows (parts of previous 2x2 pivots) // eliminate the recently pivoted column(s) from the rest // first row to be processed by this thread x = threadIdx.x + (threadIdx.y/TILE_SIZE)*TILE_SIZE; mx = mask[threadIdx.x]; my = mask[y]; // process the first (TILES - 3) tiles right away; // the even tiles are processed by the first two warps, // the odd by the other two if ( ip == jp ) { a11 = fs[ip + SIZE_X*y]; if ( my == 0 ) dev_eliminate_1x1< ELEMENT_TYPE, TILE_SIZE, TILES > ( x, y, ip, fs, SIZE_X, a11 ); } else { a11 = fs[ip + SIZE_X*y]; a12 = fs[jp + SIZE_X*y]; if ( my == 0 ) dev_eliminate_2x2< ELEMENT_TYPE, TILE_SIZE, TILES > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } // from here on, the first two warps deal with finding the largest element // in the next pivot row, while the other two continue elimination // in the remaining three tiles if ( threadIdx.y < TILE_SIZE ) { if ( row < ncols && threadIdx.y == 0 ) { not_max[threadIdx.x] = mx; // mask away processed elements if ( threadIdx.x == 0 ) jps = TILE_SIZE; // initialise the largest element column index } } else { // do elimination in the (TILES - 2)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); if ( threadIdx.y < TILE_SIZE ) { // mark elements in the pending row that cannot be largest if ( row < ncols ) { // check the element in column threadIdx.x if ( threadIdx.x != threadIdx.y && mx == 0 && my == 0 && abs(fs[row + SIZE_X*threadIdx.x]) < abs(fs[row + SIZE_X*threadIdx.y]) ) not_max[threadIdx.x] = 1; // no good: a larger value exists elsewhere } } else { // do elimination in the (TILES - 1)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); if ( threadIdx.y < TILE_SIZE ) { // select leftmost largest element in the row if ( row < ncols ) { if ( threadIdx.y == 0 && not_max[threadIdx.x] == 0 ) atomicMin(&jps, threadIdx.x); // in case of a tie, choose the leftmost } } else { // do elimination in the (TILES)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); } // for loop through pivot rows ends here my = mask[y]; // update successful pivot ordering in index; // if this CUDA block failed to pivot the part of column threadIdx.y of A // delegated to it, then possible successful pivoting of its other parts // by other blocks is canceled by zeroing index[threadIdx.y]; // if some other part of this column is unsuccessful, index[threadIdx.y] // remains zero if ( threadIdx.x == 0 && threadIdx.y < ncols ) atomicMin(&index[threadIdx.y], my); // save L and D factors and LD dev_save_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, offp, my, fs, fds, ds, f, ldf, fd, ldfd, d ); } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_block_ldlt( int nrows, // n.o. rows in A int ncols, // n.o. cols in A (<= TILE_SIZE) int offp, // n.o. rows in A_u ELEMENT_TYPE* a, // array of A's elements int lda, // leading dimension of a ELEMENT_TYPE* f, // array of L's elements int ldf, // leading dimension of f ELEMENT_TYPE* fd, // array of (L*D)'s elements int ldfd, // leading dimension of fd ELEMENT_TYPE* d, // array of D**(-1)'s diagonal and subdiagonal elements ELEMENT_TYPE delta, // pivoting threshold ELEMENT_TYPE eps, // zero column threshold: // the column is zeroed if all elements are <= eps int* index, // pivot index (cf. permutation matrix P) int* stat // n.o. successful pivots ) { dev_block_ldlt< ELEMENT_TYPE, TILE_SIZE, TILES > ( blockIdx.x, nrows, ncols, offp, a, lda, f, ldf, fd, ldfd, d, delta, eps, index, stat ); return; } // Same as cu_block_fact but for several A's of different size simultaneously // // Called by multinode_ldlt factorization subroutine. // template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_multiblock_ldlt( struct multiblock_fact_type *mbfdata, // factorization data ELEMENT_TYPE* f, // same for L ELEMENT_TYPE delta, // same as in cu_block_fact ELEMENT_TYPE eps, // same as in cu_block_fact int* index, // array of all pivot indices int* stat // array of successful pivots' numbers ) { /* * Read information on what to do from global memory */ mbfdata += blockIdx.x; // shift to the data for this CUDA block int ncols = mbfdata->ncols; // n.o. cols in A processed by this CUDA block if ( ncols < 1 ) return; int nrows = mbfdata->nrows; // n.o. rows in A int lda = mbfdata->ld; // leading dimension of A int p = mbfdata->p; // n.o. rows in A_u int node = mbfdata->node; // A's number int block = mbfdata->offb; // relative CUDA block index f += mbfdata->offf; // shift to the array of this L elements double *fd = mbfdata->ldptr; double *a = mbfdata->aptr; // pointer to A double *d = mbfdata->dptr; // pointer to D**(-1) dev_block_ldlt < double, TILE_SIZE, TILES > ( block, nrows, ncols, p, a, lda, f, lda, fd, lda, d, delta, eps, &index[node*TILE_SIZE], &stat[node]); } /* LDLT factorization kernel for the root delays block. The columns which the above factorization kernels failed to pivot are delayed, ie left unchanged, until some other columns in the same node are successfully pivoted, after which pivoting of delayed columns is attempted again. When a factorization subroutine terminates, generally there still may be delayed columns which this subroutine cannot possibly pivot, and they are passed on to the parent node in the elimination tree. At the root node, however, this is not possible, and a special kernel given below is applied to delayed columns, which together with the respective rows now form a square block at the lower left corner of the root node matrix. The main difference between the factorization kernel below and those above is that the pivot is sought in the whole matrix because, in the above notation, blocks A_u and A_l are no longer present. Since this matrix may be too large to fit into shared memory, the kernel below works mostly in the global memory (shared memory is only used for finding the largest element of a column). */ template< typename ELEMENT_TYPE > __global__ void cu_square_ldlt( int n, ELEMENT_TYPE* a, // A on input, L on output ELEMENT_TYPE* f, // L ELEMENT_TYPE* w, // L*D ELEMENT_TYPE* d, // main diag and subdiag of the inverse of D int ld, // leading dimension of a, f, w ELEMENT_TYPE delta, // same as above ELEMENT_TYPE eps, // same as above int* ind, // same as in cu_block_fact int* stat // same as in cu_block_fact ) { int x, y; int col; int ip, jp; int pivoted, recent; ELEMENT_TYPE a11, a12, a22, det; ELEMENT_TYPE* work = (ELEMENT_TYPE*)SharedMemory; // work array int* iwork = (int*)&work[blockDim.x]; // integer work array int* iw = (int*)&iwork[blockDim.x]; // iw[0]: failure flag, // iw[1]: largest col. elem. index for ( x = threadIdx.x; x < n; x += blockDim.x ) { ind[x] = 0; // initialize pivot index/processed columns mask for ( y = 0; y < n; y++ ) f[x + ld*y] = a[x + ld*y]; // copy A to L } for ( x = threadIdx.x; x < 2*n; x += blockDim.x ) d[x] = 0.0; // initialize D __syncthreads(); pivoted = 0; // n.o. pivoted cols for ( int pass = 0; ; pass++ ) { // failed cols are skipped until next pass recent = 0; // n.o. cols pivoted during this pass for ( col = 0; col < n; ) { if ( ind[col] ) { col++; // already pivoted, move on continue; } if ( threadIdx.x == 0 ) iw[0] = 0; // initialize failure flag __syncthreads(); // find the largest element in the pending column // // first, each thread finds its candidate for the largest one a11 = -1.0; y = -1; for ( x = threadIdx.x; x < n; x += blockDim.x ) { if ( ind[x] == 0 ) { a12 = abs(f[x + ld*col]); if ( a12 >= a11 ) { a11 = a12; y = x; } } } work[threadIdx.x] = a11; // the largest one for this thread iwork[threadIdx.x] = y; // its index __syncthreads(); // now first 8 threads reduce the number of candidates to 8 if ( threadIdx.x < 8 ) { for ( x = threadIdx.x + 8; x < blockDim.x; x += 8 ) if ( iwork[x] >= 0 && work[x] > work[threadIdx.x] ) { work[threadIdx.x] = work[x]; iwork[threadIdx.x] = iwork[x]; } } __syncthreads(); // the first thread finds the largest element and its index if ( threadIdx.x == 0 ) { y = 0; for ( x = 1; x < 8 && x < blockDim.x; x++ ) if ( iwork[x] >= 0 && (iwork[y] < 0 || work[x] > work[y]) ) y = x; iw[1] = iwork[y]; // the largest element index } __syncthreads(); // select the pivot based on the largest element index ip = col; jp = iw[1]; dev_select_pivots_at_root< ELEMENT_TYPE > ( f, ld, ip, jp, a11, a12, a22, det ); // try to pivot if ( ip == jp ) { // 1x1 pivot for ( x = threadIdx.x; x < n; x += blockDim.x ) if ( ind[x] == 0 ) if ( dev_1x1_pivot_fails< ELEMENT_TYPE > ( x, ip, f, w, ld, det, delta, eps ) ) iw[0] = 1; } else { // 2x2 pivot for ( x = threadIdx.x; x < n; x += blockDim.x ) if ( ind[x] == 0 ) if ( dev_2x2_pivot_fails< ELEMENT_TYPE > ( x, ip, jp, f, w, ld, a11, a12, a22, det, delta, eps ) ) iw[0] = 1; } __syncthreads(); if ( iw[0] ) { // pivot failed, restore the failed column(s) for ( x = threadIdx.x; x < n; x += blockDim.x ) { if ( ind[x] ) continue; f[x + ld*ip] = w[x + ld*ip]; if ( ip != jp ) f[x + ld*jp] = w[x + ld*jp]; } __syncthreads(); col++; // move on continue; } if ( threadIdx.x == 0 ) { // mark pivoted columns and invert the pivot if possible ind[ip] = pivoted + 1; if ( ip == jp ) { if ( abs(det) > eps ) // ok to invert d[2*pivoted] = 1.0/det; } else { ind[jp] = pivoted + 2; if ( abs(det) > abs(a11)*abs(a22)*1.0e-15 && abs(det) > eps*(abs(a11) + abs(a22) + abs(a12)) ) { // ok to invert d[2*pivoted ] = a22/det; d[2*pivoted + 1] = -a12/det; d[2*pivoted + 2] = a11/det; } } } __syncthreads(); // update pivot counters if ( ip == jp ) { pivoted++; recent++; } else { pivoted += 2; recent += 2; } // eliminate pivoted columns from non-processed if ( ip == jp ) { for ( x = threadIdx.x; x < n; x += blockDim.x ) for ( y = 0; y < n; y++ ) if ( x != ip && ind[y] == 0 ) f[x + ld*y] -= f[x + ld*ip] * f[ip + ld*y]; } else { for ( x = threadIdx.x; x < n; x += blockDim.x ) { for ( y = 0; y < n; y++ ) { if ( x != ip && x != jp && ind[y] == 0 ) { f[x + ld*y] -= f[x + ld*ip] * f[ip + ld*y] + f[x + ld*jp] * f[jp + ld*y]; } } } } __syncthreads(); if ( ip == col ) // this column is pivoted, move on col++; } // loop across columns if ( pivoted == n // all done || recent == 0 ) // no pivotable columns left break; } // pass if ( threadIdx.x == 0 ) stat[0] = pivoted; if ( pivoted < n ) // factorization failed return; // copy L to A for ( x = threadIdx.x; x < n; x += blockDim.x ) for ( y = 0; y < n; y++ ) a[ind[x] - 1 + ld*(ind[y] - 1)] = f[x + ld*y]; } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_multiblock_chol( struct multiblock_fact_type *mbfdata, ELEMENT_TYPE* f, // array of L nodes int* stat // execution status ) { /* * Read information on what to do from global memory */ mbfdata += blockIdx.x; int ncols = mbfdata->ncols; if ( ncols < 1 ) return; int nrows = mbfdata->nrows; int ld = mbfdata->ld; int node = mbfdata->node; int block = mbfdata->offb; ELEMENT_TYPE *a = mbfdata->aptr; f += mbfdata->offf; stat += node; dev_block_chol< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, a, ld, f, ld, stat ); } struct cstat_data_type { int nelim; double *dval; }; __global__ void cu_collect_stats( const struct cstat_data_type *csdata, struct cuda_stats *stats ) { // Designed to be run with a single thread csdata += blockIdx.x; double *d = csdata->dval; int nelim = csdata->nelim; int num_zero = 0; int num_neg = 0; int num_two = 0; for(int i = 0; i<nelim; ) { double a11 = d[2*i]; double a21 = d[2*i + 1]; if ( a21 == 0.0 ) { // 1x1 pivot (can be a zero pivot) if ( a11 == 0 ) num_zero++; if ( a11 < 0 ) num_neg++; i++; } else { // 2x2 pivot (can't be a zero pivot) double a22 = d[2*(i + 1)]; num_two++; // To check for negative eigenvalues, we exploit // det = product of evals // trace = sum of evals // if det is negative, exactly one eval is negative; // otherwise, both have same sign, equal to sign of trace double det = a11*a22 - a21*a21; double trace = a11 + a22; if ( det < 0 ) num_neg++; else if ( trace < 0 ) num_neg += 2; i += 2; } } if ( num_neg > 0 ) atomicAdd(&(stats->num_neg), num_neg); if ( num_zero > 0 ) atomicAdd(&(stats->num_zero), num_zero); if ( num_two > 0 ) atomicAdd(&(stats->num_two), num_two); } } /* anon namespace */ /******************************************************************************* * Following routines are exported with C binding so can be called from Fortran ******************************************************************************/ extern "C" { void spral_ssids_block_ldlt( cudaStream_t *stream, int nrows, int ncols, int p, double* a, int lda, double* f, int ldf, double* fd, int ldfd, double* d, double delta, double eps, int* index, int* stat ) { int nblocks = (nrows - ncols - 1)/(BLOCK_SIZE*(BLOCKS - 1)) + 1; cu_block_ldlt_init<<< 1, BLOCK_SIZE, 0, *stream >>>( ncols, stat, index ); dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); cu_block_ldlt < double, BLOCK_SIZE, BLOCKS > <<< nblocks, threads, 0, *stream >>> ( nrows, ncols, p, a, lda, f, ldf, fd, ldfd, d, delta, eps, index, stat ); } void spral_ssids_block_llt( cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* f, int ldf, int* stat ) { int smsize = CBLOCKS*BLOCK_SIZE*BLOCK_SIZE*sizeof(double); int nblocks = (nrows - ncols - 1)/(BLOCK_SIZE*(CBLOCKS - 1)) + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); cu_block_chol < double, BLOCK_SIZE, CBLOCKS > <<< nblocks, threads, smsize, *stream >>> ( nrows, ncols, a, lda, f, ldf, stat ); } void spral_ssids_collect_stats(cudaStream_t *stream, int nblk, const struct cstat_data_type *csdata, struct cuda_stats *stats) { for(int i=0; i<nblk; i+=MAX_CUDA_BLOCKS) { int nb = min(MAX_CUDA_BLOCKS, nblk-i); cu_collect_stats <<<nb, 1, 0, *stream>>> (csdata+i, stats); CudaCheckError(); } } void spral_ssids_multiblock_ldlt( cudaStream_t *stream, int nblocks, struct multiblock_fact_type *mbfdata, double* f, double delta, double eps, int* index, int* stat ) { dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_ldlt < double, BLOCK_SIZE, MBLOCKS > <<< nb, threads, 0, *stream >>> ( mbfdata + i, f, delta, eps, index, stat ); } } void spral_ssids_multiblock_ldlt_setup( cudaStream_t *stream, int nblocks, struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int* stat, int* ind, int* ncb ) { dim3 threads(10,8); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_fact_setup <<< nb, threads, 0, *stream >>> ( ndata + i, mbfdata, step, block_size, blocks, i, stat + i, ind + block_size*i, ncb ); } } void spral_ssids_multiblock_llt( cudaStream_t *stream, int nblocks, struct multiblock_fact_type *mbfdata, double* f, int* stat ) { if ( nblocks < 1 ) return; int smsize = MCBLOCKS*BLOCK_SIZE*BLOCK_SIZE*sizeof(double); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_chol < double, BLOCK_SIZE, MCBLOCKS > <<< nb, threads, smsize, *stream >>> ( mbfdata + i, f, stat ); } } void spral_ssids_multiblock_llt_setup( cudaStream_t *stream, int nblocks, struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int* stat, int* ncb ) { dim3 threads(10,8); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_fact_setup <<< nb, threads, 0, *stream >>> ( ndata + i, mbfdata, step, block_size, blocks, i, stat + i, 0, ncb ); } } void spral_ssids_square_ldlt( cudaStream_t *stream, int n, double* a, double* f, double* w, double* d, int ld, double delta, double eps, int* index, int* stat ) { int nt = min(n, 256); int sm = nt*sizeof(double) + (nt + 2)*sizeof(int); cu_square_ldlt< double ><<< 1, nt, sm, *stream >>> ( n, a, f, w, d, ld, delta, eps, index, stat ); } } // end extern "C"
11563205e54e07b00029afd20a25978e9d9196d5.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include "opencv2/opencv.hpp" #include <opencv2/core/cuda.hpp> #include "opencv2/core.hpp" #include <opencv2/core/utility.hpp> #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include <string> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include "HPT1.h" #include <stdio.h> using namespace std; using namespace cv; Mat host_image; Mat orig_image; unsigned char * device_src = nullptr; unsigned char * device_dst = nullptr; HighPrecisionTime t; size_t image_bytes; string window_name("Ouput"); const int THRESHOLD_SLIDER_MAX = 256; int threshold_slider = 0; int inner_slider = 0; int outer_slider = 100; double gpu_accumulator = 0; int gpu_counter = 0; double cpu_accumulator = 0; int cpu_counter = 0; bool gpu_mode = true; typedef unsigned char uByte; __global__ void boxKernel(uByte* data, uByte* data2,int w, int h) { int PlaceX = ((blockIdx.x *blockDim.x) + threadIdx.x); int PlaceY = (blockIdx.y *blockDim.y) + threadIdx.y; if (PlaceX < w && PlaceY < h) { int currentPixelSum = data[PlaceX*PlaceY]; //w-1,h+1 currentPixelSum += data[(PlaceY + 1)*PlaceX - 1]; //h+1, w+1 currentPixelSum += data[(PlaceY + 1) * PlaceX + 1]; //h+1 currentPixelSum += data[(PlaceY + 1) * PlaceX]; //w-1 currentPixelSum += data[(PlaceX * PlaceX) - 1]; //w+1 currentPixelSum += data[(PlaceY*PlaceX) + 1]; //w-1,h-1 currentPixelSum += data[((PlaceY - 1) * PlaceX) - 1]; //h-1 currentPixelSum += data[(PlaceY - 1)*PlaceX]; //w+1,h-1 currentPixelSum += data[(PlaceY - 1) * PlaceX + 1]; data2[PlaceY * PlaceY] = uByte(currentPixelSum / 9.0f); } } void ConvolveGPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer) { uByte* imageTemp = nullptr; uByte* imageBlurred = nullptr; hipError_t cudaTest; int ConvolveSize = W*H; cudaTest = hipSetDevice(0); if (cudaTest != hipSuccess) { cout << "Error with device" << endl; } cudaTest = hipMalloc(&imageTemp, ConvolveSize); if (cudaTest != hipSuccess) { cout << "hipMalloc failed!" << endl; } cudaTest = hipMalloc(&imageBlurred, ConvolveSize); if (cudaTest != hipSuccess) { cout << "hipMalloc failed!" << endl; } cudaTest = hipDeviceSynchronize(); if (cudaTest != hipSuccess) { cout << "cudaSync failed!" << endl; } cudaTest = hipMemcpy(imageTemp, src, ConvolveSize, hipMemcpyHostToDevice); if (cudaTest != hipSuccess) { cout << "cudacpy failed!" << endl; } int blocksNeeded = (ConvolveSize + 1023) / 1024; hipDeviceSynchronize(); boxKernel << < blocksNeeded, 1024 >> > (imageTemp, imageBlurred, host_image.cols, host_image.rows); cudaTest = hipMemcpy(dst, imageBlurred, ConvolveSize, hipMemcpyDeviceToHost); if (cudaTest != hipSuccess) { cout << "cudacpy2 failed!" << endl; } } double ConvolveCPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer){ int kernel_size, half_kernel_size; kernel_size = 3;// (getTrackbarPos(kernel_size_name, window_name_cpu) + 1) * 2 + 1; half_kernel_size = kernel_size / 2; float divisor = 9.0f;//float(SumKernel(kernel, kernel_size)); timer.TimeSinceLastCall(); for (int y = 0; y < H; y++) { for (int x = 0; x < W; x++) { // Initialize with 0,0 int sum = 0; // *(src + y * W + x) * kernel[half_kernel_size * kernel_size + half_kernel_size]; uByte * kp = kernel + half_kernel_size; for (int y_offset = -half_kernel_size; y_offset <= half_kernel_size; y_offset++, kp += kernel_size) { if (y_offset + y < 0 || y_offset + y >= H) continue; sum += *(src + (y_offset + y) * W + x) * *kp; for (int x_offset = 1; x_offset <= half_kernel_size; x_offset++) { if (x - x_offset >= 0) sum += *(src + (y_offset + y) * W - x_offset + x) * *(kp - x_offset); if (x + x_offset < W) sum += *(src + (y_offset + y) * W + x_offset + x) * *(kp + x_offset); } } *(dst + y * W + x) = uByte(float(sum) / divisor); } } return timer.TimeSinceLastCall(); } void Threshold(Mat & image, int t) { assert(image.channels() == 1); unsigned char lt = static_cast<unsigned char>(t); const long long e = reinterpret_cast<long long>(image.data + image.cols * image.rows); if (t == 256) { memset(image.data, 0, image.rows * image.cols); } else { for (long long p = reinterpret_cast<long long>(image.data); p < e; p++) { *((unsigned char *)p) = (*((unsigned char *)p) >= lt) ? 255 : 0; } } } __global__ void vignette(const unsigned char * src, unsigned char * dst, float inner, float outer, const size_t width, const size_t height) { // the xIndex and yIndex will be used cordinates pixels of the image // NOTE // NOTE This assumes that we are treating this as a two dimensional data structure and the blocks will be used in the same way // NOTE size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Checking to see if the indexs are within the bounds of the image if (xIndex < width && yIndex < height) { // offset represents postion of the current pixel in the one dimensional array size_t offset = yIndex * width + xIndex; // Shift the pixel oriented coordinates into image resolution independent coordinates // where 0, 0 is the center of the image. float x = xIndex / float(height) - float(width) / float(height) / 2.0f; float y = yIndex / float(height) - 0.5f; //Calculates current pixels distance from the center where the cordinates are 0, 0 float d = sqrtf(x * x + y * y); if (d < inner) { // if d is less than inner boundary, we don't change that specific image pixel *(dst + offset) = *(src + offset); } else if (d > outer) { // if d is greater than outer boundary, we set it to 0 so it becomes black *(dst + offset) = 0; } else { // If in between the inner and outer boundaries, it will be a shade of gray. // NOTE // NOTE This assumes... by the time we get here, we have checked that outer does not equal inner // NOTE This also assumes ... by the time we get here, we have made inner less than outer // NOTE float v = 1 - (d - inner) / (outer - inner); *(dst + offset) = (unsigned char)(*(src + offset) * v); } } } __global__ void kernel(const unsigned char * src, unsigned char * dst, int level, const size_t width, const size_t height) { const size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; const size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; if (xIndex < width && yIndex < height) { size_t o = yIndex * width + xIndex; if (level == 256) { *(dst + o) = 0; } else { *(dst + o) = (*(src + o) >= level) ? 255 : 0; } // Notice how the below version avoids having an 'if' statement. // I wonder if this is truly correct - I'll have to test this // carefully someday but it works correctly. I figured the // subtraction should cause an underflow which the shift might // propagate through the rest of the byte so as to cause 255. // *(dst + o) = ~((*(src + o) - level - 1) >> 7); } } void on_trackbar(int, void *) { dim3 grid((host_image.cols + 1023) / 1024, host_image.rows); double d; int i = inner_slider; int o = outer_slider; if (i > o) { swap(i, o); } if (i == o) { o++; } float inner = i / 100.0f; float outer = o / 100.0f; if (gpu_mode) { if (hipMemcpy(device_src, orig_image.ptr(), image_bytes, hipMemcpyHostToDevice) != hipSuccess) { cerr << "hipMemcpy failed at " << __LINE__ << endl; hipDeviceReset(); exit(1); } t.TimeSinceLastCall(); kernel << <grid, 1024 >> >(device_src, device_dst, threshold_slider, host_image.cols, host_image.rows); hipDeviceSynchronize(); gpu_accumulator += t.TimeSinceLastCall(); gpu_counter++; cout << "GPU AVG " << setw(12) << fixed << setprecision(8) << gpu_accumulator / ((double)gpu_counter) << " seconds"; vignette << <grid, 1024 >> >(device_dst, device_src, inner, outer, host_image.cols, host_image.rows); hipDeviceSynchronize(); t.TimeSinceLastCall(); if (hipMemcpy(host_image.ptr(), device_src, image_bytes, hipMemcpyDeviceToHost) != hipSuccess) { cerr << "hipMemcpy failed at " << __LINE__ << endl; } d = t.TimeSinceLastCall(); cout << " XFER: " << setw(8) << setprecision(4) << ((double)image_bytes) / (d * 1024.0 * 1024.0 * 1024.0) << " GB/s" << endl; } else { t.TimeSinceLastCall(); host_image = orig_image; Threshold(host_image, threshold_slider); cpu_accumulator += t.TimeSinceLastCall(); cpu_counter++; cout << "CPU AVG " << setw(12) << fixed << setprecision(8) << cpu_accumulator / ((double)cpu_counter) << " seconds"; cout << endl; } imshow(window_name, host_image); } int main(int argc, char * argv[]) { if (argc != 2) { cout << " Usage: display_image ImageToLoadAndDisplay" << endl; return -1; } host_image = imread(argv[1], CV_LOAD_IMAGE_COLOR); if (!host_image.data) { cout << "Could not open or find the image" << std::endl; return -1; } cout << "Image has: " << host_image.channels() << " channels." << endl; cout << "Image has size: " << host_image.cols << " x " << host_image.rows << " pixels." << endl; cvtColor(host_image, host_image, cv::COLOR_RGB2GRAY); host_image.copyTo(orig_image); cout << "Converted to gray." << endl; if (hipSetDevice(0) != hipSuccess) { cerr << "hipSetDevice(0) failed." << endl; hipDeviceReset(); exit(1); } image_bytes = host_image.rows * host_image.cols; hipMalloc(&device_src, image_bytes); hipMalloc(&device_dst, image_bytes); if (device_dst == nullptr || device_src == nullptr) { cerr << "hipMalloc failed on either device_src or device_dst at " << __LINE__ << endl; hipDeviceReset(); exit(1); } // Copy the source image to the device. Note - although the kernel will be // called repeatedly, this is the only time we'll copy TO the device as the // image processing operation does not harm the original image. if (hipMemcpy(device_src, host_image.data, image_bytes, hipMemcpyHostToDevice) != hipSuccess) { cerr << "hipMemcpy failed at " << __LINE__ << endl; hipDeviceReset(); exit(1); } namedWindow(window_name, WINDOW_KEEPRATIO); resizeWindow(window_name, host_image.cols / 10, host_image.rows / 10); createTrackbar("Threshold", window_name, &threshold_slider, THRESHOLD_SLIDER_MAX, on_trackbar); createTrackbar("Inner", window_name, &inner_slider, 100, on_trackbar); createTrackbar("Outer", window_name, &outer_slider, 100, on_trackbar); on_trackbar(threshold_slider, 0); int k; while ((k = waitKey(10)) != 'q') { if (k == 'm') { gpu_mode = !gpu_mode; on_trackbar(0, nullptr); } } destroyAllWindows(); if (device_src != nullptr) hipFree(device_src); if (device_dst != nullptr) hipFree(device_dst); hipDeviceReset(); return 0; }
11563205e54e07b00029afd20a25978e9d9196d5.cu
#include <iostream> #include <iomanip> #include "opencv2/opencv.hpp" #include <opencv2/core/cuda.hpp> #include "opencv2/core.hpp" #include <opencv2/core/utility.hpp> #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include <string> #include <cuda.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include "HPT1.h" #include <stdio.h> using namespace std; using namespace cv; Mat host_image; Mat orig_image; unsigned char * device_src = nullptr; unsigned char * device_dst = nullptr; HighPrecisionTime t; size_t image_bytes; string window_name("Ouput"); const int THRESHOLD_SLIDER_MAX = 256; int threshold_slider = 0; int inner_slider = 0; int outer_slider = 100; double gpu_accumulator = 0; int gpu_counter = 0; double cpu_accumulator = 0; int cpu_counter = 0; bool gpu_mode = true; typedef unsigned char uByte; __global__ void boxKernel(uByte* data, uByte* data2,int w, int h) { int PlaceX = ((blockIdx.x *blockDim.x) + threadIdx.x); int PlaceY = (blockIdx.y *blockDim.y) + threadIdx.y; if (PlaceX < w && PlaceY < h) { int currentPixelSum = data[PlaceX*PlaceY]; //w-1,h+1 currentPixelSum += data[(PlaceY + 1)*PlaceX - 1]; //h+1, w+1 currentPixelSum += data[(PlaceY + 1) * PlaceX + 1]; //h+1 currentPixelSum += data[(PlaceY + 1) * PlaceX]; //w-1 currentPixelSum += data[(PlaceX * PlaceX) - 1]; //w+1 currentPixelSum += data[(PlaceY*PlaceX) + 1]; //w-1,h-1 currentPixelSum += data[((PlaceY - 1) * PlaceX) - 1]; //h-1 currentPixelSum += data[(PlaceY - 1)*PlaceX]; //w+1,h-1 currentPixelSum += data[(PlaceY - 1) * PlaceX + 1]; data2[PlaceY * PlaceY] = uByte(currentPixelSum / 9.0f); } } void ConvolveGPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer) { uByte* imageTemp = nullptr; uByte* imageBlurred = nullptr; cudaError_t cudaTest; int ConvolveSize = W*H; cudaTest = cudaSetDevice(0); if (cudaTest != cudaSuccess) { cout << "Error with device" << endl; } cudaTest = cudaMalloc(&imageTemp, ConvolveSize); if (cudaTest != cudaSuccess) { cout << "cudaMalloc failed!" << endl; } cudaTest = cudaMalloc(&imageBlurred, ConvolveSize); if (cudaTest != cudaSuccess) { cout << "cudaMalloc failed!" << endl; } cudaTest = cudaDeviceSynchronize(); if (cudaTest != cudaSuccess) { cout << "cudaSync failed!" << endl; } cudaTest = cudaMemcpy(imageTemp, src, ConvolveSize, cudaMemcpyHostToDevice); if (cudaTest != cudaSuccess) { cout << "cudacpy failed!" << endl; } int blocksNeeded = (ConvolveSize + 1023) / 1024; cudaDeviceSynchronize(); boxKernel << < blocksNeeded, 1024 >> > (imageTemp, imageBlurred, host_image.cols, host_image.rows); cudaTest = cudaMemcpy(dst, imageBlurred, ConvolveSize, cudaMemcpyDeviceToHost); if (cudaTest != cudaSuccess) { cout << "cudacpy2 failed!" << endl; } } double ConvolveCPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer){ int kernel_size, half_kernel_size; kernel_size = 3;// (getTrackbarPos(kernel_size_name, window_name_cpu) + 1) * 2 + 1; half_kernel_size = kernel_size / 2; float divisor = 9.0f;//float(SumKernel(kernel, kernel_size)); timer.TimeSinceLastCall(); for (int y = 0; y < H; y++) { for (int x = 0; x < W; x++) { // Initialize with 0,0 int sum = 0; // *(src + y * W + x) * kernel[half_kernel_size * kernel_size + half_kernel_size]; uByte * kp = kernel + half_kernel_size; for (int y_offset = -half_kernel_size; y_offset <= half_kernel_size; y_offset++, kp += kernel_size) { if (y_offset + y < 0 || y_offset + y >= H) continue; sum += *(src + (y_offset + y) * W + x) * *kp; for (int x_offset = 1; x_offset <= half_kernel_size; x_offset++) { if (x - x_offset >= 0) sum += *(src + (y_offset + y) * W - x_offset + x) * *(kp - x_offset); if (x + x_offset < W) sum += *(src + (y_offset + y) * W + x_offset + x) * *(kp + x_offset); } } *(dst + y * W + x) = uByte(float(sum) / divisor); } } return timer.TimeSinceLastCall(); } void Threshold(Mat & image, int t) { assert(image.channels() == 1); unsigned char lt = static_cast<unsigned char>(t); const long long e = reinterpret_cast<long long>(image.data + image.cols * image.rows); if (t == 256) { memset(image.data, 0, image.rows * image.cols); } else { for (long long p = reinterpret_cast<long long>(image.data); p < e; p++) { *((unsigned char *)p) = (*((unsigned char *)p) >= lt) ? 255 : 0; } } } __global__ void vignette(const unsigned char * src, unsigned char * dst, float inner, float outer, const size_t width, const size_t height) { // the xIndex and yIndex will be used cordinates pixels of the image // NOTE // NOTE This assumes that we are treating this as a two dimensional data structure and the blocks will be used in the same way // NOTE size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Checking to see if the indexs are within the bounds of the image if (xIndex < width && yIndex < height) { // offset represents postion of the current pixel in the one dimensional array size_t offset = yIndex * width + xIndex; // Shift the pixel oriented coordinates into image resolution independent coordinates // where 0, 0 is the center of the image. float x = xIndex / float(height) - float(width) / float(height) / 2.0f; float y = yIndex / float(height) - 0.5f; //Calculates current pixels distance from the center where the cordinates are 0, 0 float d = sqrtf(x * x + y * y); if (d < inner) { // if d is less than inner boundary, we don't change that specific image pixel *(dst + offset) = *(src + offset); } else if (d > outer) { // if d is greater than outer boundary, we set it to 0 so it becomes black *(dst + offset) = 0; } else { // If in between the inner and outer boundaries, it will be a shade of gray. // NOTE // NOTE This assumes... by the time we get here, we have checked that outer does not equal inner // NOTE This also assumes ... by the time we get here, we have made inner less than outer // NOTE float v = 1 - (d - inner) / (outer - inner); *(dst + offset) = (unsigned char)(*(src + offset) * v); } } } __global__ void kernel(const unsigned char * src, unsigned char * dst, int level, const size_t width, const size_t height) { const size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; const size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; if (xIndex < width && yIndex < height) { size_t o = yIndex * width + xIndex; if (level == 256) { *(dst + o) = 0; } else { *(dst + o) = (*(src + o) >= level) ? 255 : 0; } // Notice how the below version avoids having an 'if' statement. // I wonder if this is truly correct - I'll have to test this // carefully someday but it works correctly. I figured the // subtraction should cause an underflow which the shift might // propagate through the rest of the byte so as to cause 255. // *(dst + o) = ~((*(src + o) - level - 1) >> 7); } } void on_trackbar(int, void *) { dim3 grid((host_image.cols + 1023) / 1024, host_image.rows); double d; int i = inner_slider; int o = outer_slider; if (i > o) { swap(i, o); } if (i == o) { o++; } float inner = i / 100.0f; float outer = o / 100.0f; if (gpu_mode) { if (cudaMemcpy(device_src, orig_image.ptr(), image_bytes, cudaMemcpyHostToDevice) != cudaSuccess) { cerr << "cudaMemcpy failed at " << __LINE__ << endl; cudaDeviceReset(); exit(1); } t.TimeSinceLastCall(); kernel << <grid, 1024 >> >(device_src, device_dst, threshold_slider, host_image.cols, host_image.rows); cudaDeviceSynchronize(); gpu_accumulator += t.TimeSinceLastCall(); gpu_counter++; cout << "GPU AVG " << setw(12) << fixed << setprecision(8) << gpu_accumulator / ((double)gpu_counter) << " seconds"; vignette << <grid, 1024 >> >(device_dst, device_src, inner, outer, host_image.cols, host_image.rows); cudaDeviceSynchronize(); t.TimeSinceLastCall(); if (cudaMemcpy(host_image.ptr(), device_src, image_bytes, cudaMemcpyDeviceToHost) != cudaSuccess) { cerr << "cudaMemcpy failed at " << __LINE__ << endl; } d = t.TimeSinceLastCall(); cout << " XFER: " << setw(8) << setprecision(4) << ((double)image_bytes) / (d * 1024.0 * 1024.0 * 1024.0) << " GB/s" << endl; } else { t.TimeSinceLastCall(); host_image = orig_image; Threshold(host_image, threshold_slider); cpu_accumulator += t.TimeSinceLastCall(); cpu_counter++; cout << "CPU AVG " << setw(12) << fixed << setprecision(8) << cpu_accumulator / ((double)cpu_counter) << " seconds"; cout << endl; } imshow(window_name, host_image); } int main(int argc, char * argv[]) { if (argc != 2) { cout << " Usage: display_image ImageToLoadAndDisplay" << endl; return -1; } host_image = imread(argv[1], CV_LOAD_IMAGE_COLOR); if (!host_image.data) { cout << "Could not open or find the image" << std::endl; return -1; } cout << "Image has: " << host_image.channels() << " channels." << endl; cout << "Image has size: " << host_image.cols << " x " << host_image.rows << " pixels." << endl; cvtColor(host_image, host_image, cv::COLOR_RGB2GRAY); host_image.copyTo(orig_image); cout << "Converted to gray." << endl; if (cudaSetDevice(0) != cudaSuccess) { cerr << "cudaSetDevice(0) failed." << endl; cudaDeviceReset(); exit(1); } image_bytes = host_image.rows * host_image.cols; cudaMalloc(&device_src, image_bytes); cudaMalloc(&device_dst, image_bytes); if (device_dst == nullptr || device_src == nullptr) { cerr << "cudaMalloc failed on either device_src or device_dst at " << __LINE__ << endl; cudaDeviceReset(); exit(1); } // Copy the source image to the device. Note - although the kernel will be // called repeatedly, this is the only time we'll copy TO the device as the // image processing operation does not harm the original image. if (cudaMemcpy(device_src, host_image.data, image_bytes, cudaMemcpyHostToDevice) != cudaSuccess) { cerr << "cudaMemcpy failed at " << __LINE__ << endl; cudaDeviceReset(); exit(1); } namedWindow(window_name, WINDOW_KEEPRATIO); resizeWindow(window_name, host_image.cols / 10, host_image.rows / 10); createTrackbar("Threshold", window_name, &threshold_slider, THRESHOLD_SLIDER_MAX, on_trackbar); createTrackbar("Inner", window_name, &inner_slider, 100, on_trackbar); createTrackbar("Outer", window_name, &outer_slider, 100, on_trackbar); on_trackbar(threshold_slider, 0); int k; while ((k = waitKey(10)) != 'q') { if (k == 'm') { gpu_mode = !gpu_mode; on_trackbar(0, nullptr); } } destroyAllWindows(); if (device_src != nullptr) cudaFree(device_src); if (device_dst != nullptr) cudaFree(device_dst); cudaDeviceReset(); return 0; }
5078873bd99e5babb1a0aece106f387fe0be9e8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////////////////////////// // MNRT License //////////////////////////////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2010 Mathias Neumann, www.maneumann.com. // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name Mathias Neumann, nor the names of contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE // GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// \file GPU\image.cu /// /// \brief Contains image conversion kernels. /// \author Mathias Neumann /// \date 21.08.2010 /// \ingroup globalillum //////////////////////////////////////////////////////////////////////////////////////////////////// #include "mncudautil_dev.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /// \KERNELS //////////////////////////////////////////////////////////////////////////////////////////////////// //@{ //////////////////////////////////////////////////////////////////////////////////////////////////// /// \fn __global__ void kernel_ConvertToRGBA8(float4* d_inRadiance, uint numPixel, /// uchar4* d_outScreenBuffer) /// /// \brief Converts radiance values (\c float4) to RGBA8 format. /// /// Resulting buffer can be displayed using OpenGL. /// /// \author Mathias Neumann /// \date 27.06.2010 /// /// \param [in] d_inRadiance Radiance pixel buffer (R, G, B, unused). /// \param numPixel Number of pixels. /// \param [out] d_outScreenBuffer Conversion target pixel buffer (RGBA8 format). //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernel_ConvertToRGBA8(float4* d_inRadiance, uint numPixel, uchar4* d_outScreenBuffer) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < numPixel) { float4 L = d_inRadiance[tid]; // Write converted data. Ensure coalesced access by writing uchar4 in one step. uchar4 pix; pix.x = (uchar)fminf(255.f, 255.f * L.x); pix.y = (uchar)fminf(255.f, 255.f * L.y); pix.z = (uchar)fminf(255.f, 255.f * L.z); d_outScreenBuffer[tid] = pix; } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// \fn __global__ void kernel_TestDiscrepancy(uint numSamples, uint screenW, uint screenH, /// uchar4* d_outScreenBuffer) /// /// \brief Simple test of halton sequence generation. /// /// More precisely, the output of ::dev_RadicalInverse() is tested. This is done by /// generating \a numSamples samples of a 2D halton sequence and plotting them on the /// given screen buffer. Due to possible write conflicts, the result might not be exact. /// /// \author Mathias Neumann /// \date 08.07.2010 /// /// \param numSamples Number of sequence members to generate. /// \param screenW Screen width in pixels. /// \param screenH Screen height in pixels. /// \param [in,out] d_outScreenBuffer Screen buffer (accumulator). //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernel_TestDiscrepancy(uint numSamples, uint screenW, uint screenH, uchar4* d_outScreenBuffer) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < numSamples) { float rnd1 = dev_RadicalInverse(tid+1, 5); float rnd2 = dev_RadicalInverse(tid+1, 7); uint myX = rnd1*screenW; uint myY = rnd2*screenH; uint myPixel = screenW*myY + myX; uchar4 pix = d_outScreenBuffer[myPixel]; pix.x = min(255, pix.x + 32); pix.y = min(255, pix.y + 32); pix.z = min(255, pix.z + 32); // Warning: Write conflicts possible! d_outScreenBuffer[myPixel] = pix; } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// \fn __global__ void kernel_GenerateErrorImage(uchar4* d_ioImage, uchar4* d_inReference, /// uint numPixels, float fScale) /// /// \brief Generates an error image from current image and reference image. /// /// The relative error is calculated and its absolute value is displayed, where a /// relative error of zero leads to black pixel value. Error scaling is possible by /// providing an error scale factor. /// /// \author Mathias Neumann /// \date 21.08.2010 /// /// \param [in,out] d_ioImage The buffer containing the current image. Will be updated to /// contain the error image. /// \param [in] d_inReference Reference image pixel buffer of same size as \a d_ioImage. /// \param numPixels Number of pixels in both buffers. /// \param fScale The error scale factor. Will be multiplied with computed /// pixel values (per channel) to amplify the resulting color. //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernel_GenerateErrorImage(uchar4* d_ioImage, uchar4* d_inReference, uint numPixels, float fScale) { uint idxPixel = blockIdx.x * blockDim.x + threadIdx.x; if(idxPixel < numPixels) { // Read pixel values. uchar4 clr = d_ioImage[idxPixel]; uchar4 clrRef = d_inReference[idxPixel]; float4 fclr = make_float4(float(clr.x) / 255.f, float(clr.y) / 255.f, float(clr.z) / 255.f, float(clr.w) / 255.f); float4 fclrRef = make_float4(float(clrRef.x) / 255.f, float(clrRef.y) / 255.f, float(clrRef.z) / 255.f, float(clrRef.w) / 255.f); float4 absErr = fclr - fclrRef; float4 relErr = absErr / fclrRef; // Write error. uchar4 err; err.x = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.x))); err.y = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.y))); err.z = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.z))); err.w = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.w))); d_ioImage[idxPixel] = err; } } //@} //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// \WRAPPERS //////////////////////////////////////////////////////////////////////////////////////////////////// //@{ /// Wraps kernel_ConvertToRGBA8() kernel call. extern "C" void KernelIMGConvertToRGBA8(float4* d_inRadiance, uint numPixel, uchar4* d_outScreenBuffer) { dim3 blockSize = dim3(256, 1, 1); dim3 gridSize = dim3(MNCUDA_DIVUP(numPixel, blockSize.x), 1, 1); hipLaunchKernelGGL(( kernel_ConvertToRGBA8), dim3(gridSize), dim3(blockSize), 0, 0, d_inRadiance, numPixel, d_outScreenBuffer); MNCUDA_CHECKERROR; } /// Calls kernel_TestDiscrepancy() to test discrepancy of ::dev_RadicalInverse(). extern "C" void KernelIMGTestDiscrepancy(uint numSamples, uint screenW, uint screenH, uchar4* d_outScreenBuffer) { dim3 blockSize = dim3(256, 1, 1); dim3 gridSize = dim3(MNCUDA_DIVUP(numSamples, blockSize.x), 1, 1); mncudaSafeCallNoSync(hipMemset(d_outScreenBuffer, 0, screenW*screenH*sizeof(uchar4))); hipLaunchKernelGGL(( kernel_TestDiscrepancy), dim3(gridSize), dim3(blockSize), 0, 0, numSamples, screenW, screenH, d_outScreenBuffer); MNCUDA_CHECKERROR; } /// Wraps kernel_GenerateErrorImage() kernel call. extern "C" void KernelIMGGenerateErrorImage(uchar4* d_ioImage, uchar4* d_inReference, uint numPixels, float fScale) { dim3 blockSize = dim3(256, 1, 1); dim3 gridSize = dim3(MNCUDA_DIVUP(numPixels, blockSize.x), 1, 1); hipLaunchKernelGGL(( kernel_GenerateErrorImage), dim3(gridSize), dim3(blockSize), 0, 0, d_ioImage, d_inReference, numPixels, fScale); MNCUDA_CHECKERROR; } //@} //////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
5078873bd99e5babb1a0aece106f387fe0be9e8d.cu
//////////////////////////////////////////////////////////////////////////////////////////////////// // MNRT License //////////////////////////////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2010 Mathias Neumann, www.maneumann.com. // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name Mathias Neumann, nor the names of contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE // GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// \file GPU\image.cu /// /// \brief Contains image conversion kernels. /// \author Mathias Neumann /// \date 21.08.2010 /// \ingroup globalillum //////////////////////////////////////////////////////////////////////////////////////////////////// #include "mncudautil_dev.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /// \KERNELS //////////////////////////////////////////////////////////////////////////////////////////////////// //@{ //////////////////////////////////////////////////////////////////////////////////////////////////// /// \fn __global__ void kernel_ConvertToRGBA8(float4* d_inRadiance, uint numPixel, /// uchar4* d_outScreenBuffer) /// /// \brief Converts radiance values (\c float4) to RGBA8 format. /// /// Resulting buffer can be displayed using OpenGL. /// /// \author Mathias Neumann /// \date 27.06.2010 /// /// \param [in] d_inRadiance Radiance pixel buffer (R, G, B, unused). /// \param numPixel Number of pixels. /// \param [out] d_outScreenBuffer Conversion target pixel buffer (RGBA8 format). //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernel_ConvertToRGBA8(float4* d_inRadiance, uint numPixel, uchar4* d_outScreenBuffer) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < numPixel) { float4 L = d_inRadiance[tid]; // Write converted data. Ensure coalesced access by writing uchar4 in one step. uchar4 pix; pix.x = (uchar)fminf(255.f, 255.f * L.x); pix.y = (uchar)fminf(255.f, 255.f * L.y); pix.z = (uchar)fminf(255.f, 255.f * L.z); d_outScreenBuffer[tid] = pix; } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// \fn __global__ void kernel_TestDiscrepancy(uint numSamples, uint screenW, uint screenH, /// uchar4* d_outScreenBuffer) /// /// \brief Simple test of halton sequence generation. /// /// More precisely, the output of ::dev_RadicalInverse() is tested. This is done by /// generating \a numSamples samples of a 2D halton sequence and plotting them on the /// given screen buffer. Due to possible write conflicts, the result might not be exact. /// /// \author Mathias Neumann /// \date 08.07.2010 /// /// \param numSamples Number of sequence members to generate. /// \param screenW Screen width in pixels. /// \param screenH Screen height in pixels. /// \param [in,out] d_outScreenBuffer Screen buffer (accumulator). //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernel_TestDiscrepancy(uint numSamples, uint screenW, uint screenH, uchar4* d_outScreenBuffer) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < numSamples) { float rnd1 = dev_RadicalInverse(tid+1, 5); float rnd2 = dev_RadicalInverse(tid+1, 7); uint myX = rnd1*screenW; uint myY = rnd2*screenH; uint myPixel = screenW*myY + myX; uchar4 pix = d_outScreenBuffer[myPixel]; pix.x = min(255, pix.x + 32); pix.y = min(255, pix.y + 32); pix.z = min(255, pix.z + 32); // Warning: Write conflicts possible! d_outScreenBuffer[myPixel] = pix; } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// \fn __global__ void kernel_GenerateErrorImage(uchar4* d_ioImage, uchar4* d_inReference, /// uint numPixels, float fScale) /// /// \brief Generates an error image from current image and reference image. /// /// The relative error is calculated and its absolute value is displayed, where a /// relative error of zero leads to black pixel value. Error scaling is possible by /// providing an error scale factor. /// /// \author Mathias Neumann /// \date 21.08.2010 /// /// \param [in,out] d_ioImage The buffer containing the current image. Will be updated to /// contain the error image. /// \param [in] d_inReference Reference image pixel buffer of same size as \a d_ioImage. /// \param numPixels Number of pixels in both buffers. /// \param fScale The error scale factor. Will be multiplied with computed /// pixel values (per channel) to amplify the resulting color. //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernel_GenerateErrorImage(uchar4* d_ioImage, uchar4* d_inReference, uint numPixels, float fScale) { uint idxPixel = blockIdx.x * blockDim.x + threadIdx.x; if(idxPixel < numPixels) { // Read pixel values. uchar4 clr = d_ioImage[idxPixel]; uchar4 clrRef = d_inReference[idxPixel]; float4 fclr = make_float4(float(clr.x) / 255.f, float(clr.y) / 255.f, float(clr.z) / 255.f, float(clr.w) / 255.f); float4 fclrRef = make_float4(float(clrRef.x) / 255.f, float(clrRef.y) / 255.f, float(clrRef.z) / 255.f, float(clrRef.w) / 255.f); float4 absErr = fclr - fclrRef; float4 relErr = absErr / fclrRef; // Write error. uchar4 err; err.x = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.x))); err.y = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.y))); err.z = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.z))); err.w = (uchar)fminf(255.f, fmaxf(0.f, fScale * 255.f * fabsf(absErr.w))); d_ioImage[idxPixel] = err; } } //@} //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// \WRAPPERS //////////////////////////////////////////////////////////////////////////////////////////////////// //@{ /// Wraps kernel_ConvertToRGBA8() kernel call. extern "C" void KernelIMGConvertToRGBA8(float4* d_inRadiance, uint numPixel, uchar4* d_outScreenBuffer) { dim3 blockSize = dim3(256, 1, 1); dim3 gridSize = dim3(MNCUDA_DIVUP(numPixel, blockSize.x), 1, 1); kernel_ConvertToRGBA8<<<gridSize, blockSize>>>(d_inRadiance, numPixel, d_outScreenBuffer); MNCUDA_CHECKERROR; } /// Calls kernel_TestDiscrepancy() to test discrepancy of ::dev_RadicalInverse(). extern "C" void KernelIMGTestDiscrepancy(uint numSamples, uint screenW, uint screenH, uchar4* d_outScreenBuffer) { dim3 blockSize = dim3(256, 1, 1); dim3 gridSize = dim3(MNCUDA_DIVUP(numSamples, blockSize.x), 1, 1); mncudaSafeCallNoSync(cudaMemset(d_outScreenBuffer, 0, screenW*screenH*sizeof(uchar4))); kernel_TestDiscrepancy<<<gridSize, blockSize>>>(numSamples, screenW, screenH, d_outScreenBuffer); MNCUDA_CHECKERROR; } /// Wraps kernel_GenerateErrorImage() kernel call. extern "C" void KernelIMGGenerateErrorImage(uchar4* d_ioImage, uchar4* d_inReference, uint numPixels, float fScale) { dim3 blockSize = dim3(256, 1, 1); dim3 gridSize = dim3(MNCUDA_DIVUP(numPixels, blockSize.x), 1, 1); kernel_GenerateErrorImage<<<gridSize, blockSize>>>(d_ioImage, d_inReference, numPixels, fScale); MNCUDA_CHECKERROR; } //@} //////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
6b0a4843a9b5f2fb6ce0b53fd53147d876874a92.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_C; unsigned* d_A; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(const unsigned* A,unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned I1=A[i]; #pragma unroll 1 //Excessive Logical Unit access for(unsigned k=0; k<iterations;k++) { // BLOCK-0 (For instruction size of 16 bytes for Volta __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); } C[i]=I1; __syncthreads(); } // Host code int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A,d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
6b0a4843a9b5f2fb6ce0b53fd53147d876874a92.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_C; unsigned* d_A; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(const unsigned* A,unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned I1=A[i]; #pragma unroll 1 //Excessive Logical Unit access for(unsigned k=0; k<iterations;k++) { // BLOCK-0 (For instruction size of 16 bytes for Volta __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); } C[i]=I1; __syncthreads(); } // Host code int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal<<<dimGrid,dimBlock>>>(d_A,d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
03c7e17599662b639908a70eb8493da565006643.hip
// !!! This is a file automatically generated by hipify!!! /* * File: MBSet.cu * * Created on June 24, 2012 * * Purpose: This program displays Mandelbrot set using the GPU via CUDA and * OpenGL immediate mode. * */ #include <iostream> #include <stack> #include <hip/hip_runtime_api.h> #include <stdio.h> #include "Complex.cu" #include <GL/freeglut.h> // Size of window in pixels, both width and height #define WINDOW_DIM (512) // Maximum iterations for the MBSet calculations #define MAX_IT (2000) using namespace std; // Initial screen coordinates, both host and device. Complex minC(-2.0, -1.2); Complex maxC(1.0, 1.8); Complex* dev_minC; Complex* dev_maxC; // Block and thread counts for CUDA dim3 blocks(WINDOW_DIM/8, WINDOW_DIM/8); dim3 threads(8, 8); _global__ void calculateInSet(Complex * minC, Complex * maxC, int * iterations, int * result) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int width = blockDim.x * gridDim.x; int height = blockDim.y * gridDim.y; int offset = x + y * width; Complex complexOffset(x * (maxC->r - minC->r)/width, y * (maxC->i - minC->i)/height); Complex c(*minC + complexOffset); int maxIteration = *iterations; int i = 0; Complex z(c); while(i != maxIteration) { if(z.magnitude2() > 4.0f) { break; } z = z*z + c; ++i; } result[offset] = i; } void computeMandelBrotSet() { hipMalloc((void **) &dev_minC, sizeof(Complex)); hipMalloc((void **) &dev_maxC, sizeof(Complex)); hipMalloc((void **) &dev_iterations, sizeof(int)); hipMalloc((void **) &dev_iterationArray, sizeof(int) * WINDOW_DIM * WINDOW_DIM); hipMemcpy(dev_minC, &minC, sizeof(Complex), hipMemcpyHostToDevice); hipMemcpy(dev_maxC, &maxC, sizeof(Complex), hipMemcpyHostToDevice); hipMemcpy(dev_iterations, &iterations, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( calculateInSet), dim3(blocks), dim3(threads), 0, 0, dev_minC, dev_maxC, dev_iterations, dev_iterationArray); hipMemcpy(host_iterationArray, dev_iterationArray, sizeof(int) * WINDOW_DIM * WINDOW_DIM, hipMemcpyDeviceToHost); return; } int width=512, height=512; // window size int windowID; GLfloat minX = -2.0f, maxX = 1.0f, minY = -1.2f, maxY = 1.8f; // complex plane boundaries GLfloat stepX = (maxX - minX)/(GLfloat)width; GLfloat stepY = (maxY - minY)/(GLfloat)height; GLfloat black[] = {0.0f, 0.0f, 0.0f}; // black color GLfloat white[] = {1.0f, 1.0f, 1.0f}; // white color const int paletteSize = 128; GLfloat palette[paletteSize][3]; bool fullScreen=false; double maxIteration = 2000; double startMouseClickX = 0.0; double startMouseClickY = 0.0; double endMouseClickX = 0.0; double endMouseClickY = 0.0; double currentMouseX = 0.0; double currentMouseY = 0.0; bool isBox = false; void drag (int x, int y){ // cout << "============================="<<endl; // cout << x << '\t' <<y<< endl; currentMouseX = x; currentMouseY = y; } void mouse(int button, int state, int x, int y){ // cout << "============================="<<endl; if (state==GLUT_DOWN) { cout << "DOWN" <<endl; // cout << "x: " << x << "\n"; // cout << "y: " << y << "\n"; startMouseClickX = x; startMouseClickY = y; } if (state==GLUT_UP) { cout << "UP" <<endl; // cout << "x: " << x << "\n"; // cout << "y: " << y << "\n"; endMouseClickX = x; endMouseClickY = y; isBox = true; cout << "Redisplaying" <<endl; glutPostRedisplay(); isBox = false; } } GLfloat* calculateColor(GLfloat u, GLfloat v){ GLfloat re = u; GLfloat im = v; GLfloat tempRe=0.0; Complex c = Complex((float)re,(float)im); Complex Zn0 = c; Complex Zn1(0,0); bool isWhite = false; short isWhiteIter = -100; for (int i = 0; i < maxIteration; ++i) { Zn1 = Zn0*Zn0 + c; if (Zn1.magnitude2() > 2.0*2.0) { isWhite = true; isWhiteIter = i; break; cout << "breaking!!"; } Zn0 = Zn1; } if(isWhite && isWhiteIter >= 0) { return palette[isWhiteIter%128]; } else return black; } GLfloat* mandelImage[512][512]; void repaint() {// function called to repaint the window glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear the screen buffer glBegin(GL_POINTS); // start drawing in single pixel mode short cx = 0; short cy = 0; for(GLfloat y = maxY; y >= minY; y -= stepY){ for(GLfloat x = minX; x <= maxX; x += stepX){ glColor3fv(mandelImage[cx][cy]); // set color glVertex2f(cx,cy); cx++; } cy++; cx = 0; } glEnd(); // end drawing isBox = true; if (isBox) { // float side = ((endMouseClickX - startMouseClickX) < (endMouseClickY - startMouseClickY)) ? endMouseClickX - startMouseClickX : endMouseClickY - startMouseClickY; // endMouseClickX = startMouseClickX + side; // endMouseClickY = startMouseClickY + side; // float topLeftXTransformed = (startMouseClickX - 256.0)/256.0; // float topLeftYTransformed = (256.0 - startMouseClickY)/256.0; // // float bottomRightXTransformed = (endMouseClickX - 256.0)/256.0; // float bottomRightYTransformed = (256.0 - endMouseClickY)/256.0; // // cout<<"Drawing red box: ("<<topLeftXTransformed<<", "<<topLeftYTransformed<<") : (" << bottomRightXTransformed <<", "<< bottomRightYTransformed <<")\n"; glColor3f(1.0, 0.0, 0.0); glBegin(GL_LINE_LOOP); // glVertex2f(topLeftXTransformed, topLeftYTransformed); // glVertex2f(bottomRightXTransformed,topLeftYTransformed); // glVertex2f(bottomRightXTransformed,bottomRightYTransformed); // glVertex2f(topLeftXTransformed,bottomRightYTransformed); glVertex2f(0.5,0.5); glVertex2f(0.25,0.25); glEnd(); } glutSwapBuffers(); // swap the buffers - [ 2 ] } void createPalette(){ int eight = 4; int four = 2; for(int i=0; i < 32; i++){ palette[i][0] = (eight*i)/(GLfloat)255; palette[i][1] = (128-four*i)/(GLfloat)255; palette[i][2] = (255-eight*i)/(GLfloat)255; } for(int i=0; i < 32; i++){ palette[32+i][0] = (GLfloat)1; palette[32+i][1] = (eight*i)/(GLfloat)255; palette[32+i][2] = (GLfloat)0; } for(int i=0; i < 32; i++){ palette[64+i][0] = (128-four*i)/(GLfloat)255; palette[64+i][1] = (GLfloat)1; palette[64+i][2] = (eight*i)/(GLfloat)255; } for(int i=0; i < 32; i++){ palette[96+i][0] = (GLfloat)0; palette[96+i][1] = (255-eight*i)/(GLfloat)255; palette[96+i][2] = (eight*i)/(GLfloat)255; } } int main(int argc, char** argv) { // Initialize OPENGL here // Set up necessary host and device buffers // set up the opengl callbacks for display, mouse and keyboard // Calculate the interation counts // Grad students, pick the colors for the 0 .. 1999 iteration count pixels glutInit(&argc, argv); createPalette(); glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); GLsizei windowX = (glutGet(GLUT_SCREEN_WIDTH)-width)/2; GLsizei windowY = (glutGet(GLUT_SCREEN_HEIGHT)-height)/2; glutInitWindowPosition(windowX, windowY); glutInitWindowSize(width, height); windowID = glutCreateWindow("MANDELBROTH"); glShadeModel(GL_SMOOTH); glEnable(GL_DEPTH_TEST); // glViewport (0, 0, (GLsizei) width, (GLsizei) height); // glMatrixMode (GL_PROJECTION); // glLoadIdentity(); // glOrtho(minX, maxX, minY, maxY, ((GLfloat)-1), (GLfloat)1); // set the event handling methods short cx = 0; short cy = 0; for(GLfloat y = maxY; y >= minY; y -= stepY){ for(GLfloat x = minX; x <= maxX; x += stepX){ GLfloat* temp = calculateColor(x,y); // cout << temp; // cout << cx <<"\t"<< cy <<endl; mandelImage[cx][cy] = temp; cx++; } cy++; cx = 0; } glutDisplayFunc(repaint); // glutReshapeFunc(reshape); glutKeyboardFunc(keyFunction); glutMouseFunc(mouse); glutMotionFunc(drag); glutMainLoop(); // THis will callback the display, keyboard and mouse return 0; }
03c7e17599662b639908a70eb8493da565006643.cu
/* * File: MBSet.cu * * Created on June 24, 2012 * * Purpose: This program displays Mandelbrot set using the GPU via CUDA and * OpenGL immediate mode. * */ #include <iostream> #include <stack> #include <cuda_runtime_api.h> #include <stdio.h> #include "Complex.cu" #include <GL/freeglut.h> // Size of window in pixels, both width and height #define WINDOW_DIM (512) // Maximum iterations for the MBSet calculations #define MAX_IT (2000) using namespace std; // Initial screen coordinates, both host and device. Complex minC(-2.0, -1.2); Complex maxC(1.0, 1.8); Complex* dev_minC; Complex* dev_maxC; // Block and thread counts for CUDA dim3 blocks(WINDOW_DIM/8, WINDOW_DIM/8); dim3 threads(8, 8); _global__ void calculateInSet(Complex * minC, Complex * maxC, int * iterations, int * result) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int width = blockDim.x * gridDim.x; int height = blockDim.y * gridDim.y; int offset = x + y * width; Complex complexOffset(x * (maxC->r - minC->r)/width, y * (maxC->i - minC->i)/height); Complex c(*minC + complexOffset); int maxIteration = *iterations; int i = 0; Complex z(c); while(i != maxIteration) { if(z.magnitude2() > 4.0f) { break; } z = z*z + c; ++i; } result[offset] = i; } void computeMandelBrotSet() { cudaMalloc((void **) &dev_minC, sizeof(Complex)); cudaMalloc((void **) &dev_maxC, sizeof(Complex)); cudaMalloc((void **) &dev_iterations, sizeof(int)); cudaMalloc((void **) &dev_iterationArray, sizeof(int) * WINDOW_DIM * WINDOW_DIM); cudaMemcpy(dev_minC, &minC, sizeof(Complex), cudaMemcpyHostToDevice); cudaMemcpy(dev_maxC, &maxC, sizeof(Complex), cudaMemcpyHostToDevice); cudaMemcpy(dev_iterations, &iterations, sizeof(int), cudaMemcpyHostToDevice); calculateInSet<<<blocks, threads>>>(dev_minC, dev_maxC, dev_iterations, dev_iterationArray); cudaMemcpy(host_iterationArray, dev_iterationArray, sizeof(int) * WINDOW_DIM * WINDOW_DIM, cudaMemcpyDeviceToHost); return; } int width=512, height=512; // window size int windowID; GLfloat minX = -2.0f, maxX = 1.0f, minY = -1.2f, maxY = 1.8f; // complex plane boundaries GLfloat stepX = (maxX - minX)/(GLfloat)width; GLfloat stepY = (maxY - minY)/(GLfloat)height; GLfloat black[] = {0.0f, 0.0f, 0.0f}; // black color GLfloat white[] = {1.0f, 1.0f, 1.0f}; // white color const int paletteSize = 128; GLfloat palette[paletteSize][3]; bool fullScreen=false; double maxIteration = 2000; double startMouseClickX = 0.0; double startMouseClickY = 0.0; double endMouseClickX = 0.0; double endMouseClickY = 0.0; double currentMouseX = 0.0; double currentMouseY = 0.0; bool isBox = false; void drag (int x, int y){ // cout << "============================="<<endl; // cout << x << '\t' <<y<< endl; currentMouseX = x; currentMouseY = y; } void mouse(int button, int state, int x, int y){ // cout << "============================="<<endl; if (state==GLUT_DOWN) { cout << "DOWN" <<endl; // cout << "x: " << x << "\n"; // cout << "y: " << y << "\n"; startMouseClickX = x; startMouseClickY = y; } if (state==GLUT_UP) { cout << "UP" <<endl; // cout << "x: " << x << "\n"; // cout << "y: " << y << "\n"; endMouseClickX = x; endMouseClickY = y; isBox = true; cout << "Redisplaying" <<endl; glutPostRedisplay(); isBox = false; } } GLfloat* calculateColor(GLfloat u, GLfloat v){ GLfloat re = u; GLfloat im = v; GLfloat tempRe=0.0; Complex c = Complex((float)re,(float)im); Complex Zn0 = c; Complex Zn1(0,0); bool isWhite = false; short isWhiteIter = -100; for (int i = 0; i < maxIteration; ++i) { Zn1 = Zn0*Zn0 + c; if (Zn1.magnitude2() > 2.0*2.0) { isWhite = true; isWhiteIter = i; break; cout << "breaking!!"; } Zn0 = Zn1; } if(isWhite && isWhiteIter >= 0) { return palette[isWhiteIter%128]; } else return black; } GLfloat* mandelImage[512][512]; void repaint() {// function called to repaint the window glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear the screen buffer glBegin(GL_POINTS); // start drawing in single pixel mode short cx = 0; short cy = 0; for(GLfloat y = maxY; y >= minY; y -= stepY){ for(GLfloat x = minX; x <= maxX; x += stepX){ glColor3fv(mandelImage[cx][cy]); // set color glVertex2f(cx,cy); cx++; } cy++; cx = 0; } glEnd(); // end drawing isBox = true; if (isBox) { // float side = ((endMouseClickX - startMouseClickX) < (endMouseClickY - startMouseClickY)) ? endMouseClickX - startMouseClickX : endMouseClickY - startMouseClickY; // endMouseClickX = startMouseClickX + side; // endMouseClickY = startMouseClickY + side; // float topLeftXTransformed = (startMouseClickX - 256.0)/256.0; // float topLeftYTransformed = (256.0 - startMouseClickY)/256.0; // // float bottomRightXTransformed = (endMouseClickX - 256.0)/256.0; // float bottomRightYTransformed = (256.0 - endMouseClickY)/256.0; // // cout<<"Drawing red box: ("<<topLeftXTransformed<<", "<<topLeftYTransformed<<") : (" << bottomRightXTransformed <<", "<< bottomRightYTransformed <<")\n"; glColor3f(1.0, 0.0, 0.0); glBegin(GL_LINE_LOOP); // glVertex2f(topLeftXTransformed, topLeftYTransformed); // glVertex2f(bottomRightXTransformed,topLeftYTransformed); // glVertex2f(bottomRightXTransformed,bottomRightYTransformed); // glVertex2f(topLeftXTransformed,bottomRightYTransformed); glVertex2f(0.5,0.5); glVertex2f(0.25,0.25); glEnd(); } glutSwapBuffers(); // swap the buffers - [ 2 ] } void createPalette(){ int eight = 4; int four = 2; for(int i=0; i < 32; i++){ palette[i][0] = (eight*i)/(GLfloat)255; palette[i][1] = (128-four*i)/(GLfloat)255; palette[i][2] = (255-eight*i)/(GLfloat)255; } for(int i=0; i < 32; i++){ palette[32+i][0] = (GLfloat)1; palette[32+i][1] = (eight*i)/(GLfloat)255; palette[32+i][2] = (GLfloat)0; } for(int i=0; i < 32; i++){ palette[64+i][0] = (128-four*i)/(GLfloat)255; palette[64+i][1] = (GLfloat)1; palette[64+i][2] = (eight*i)/(GLfloat)255; } for(int i=0; i < 32; i++){ palette[96+i][0] = (GLfloat)0; palette[96+i][1] = (255-eight*i)/(GLfloat)255; palette[96+i][2] = (eight*i)/(GLfloat)255; } } int main(int argc, char** argv) { // Initialize OPENGL here // Set up necessary host and device buffers // set up the opengl callbacks for display, mouse and keyboard // Calculate the interation counts // Grad students, pick the colors for the 0 .. 1999 iteration count pixels glutInit(&argc, argv); createPalette(); glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); GLsizei windowX = (glutGet(GLUT_SCREEN_WIDTH)-width)/2; GLsizei windowY = (glutGet(GLUT_SCREEN_HEIGHT)-height)/2; glutInitWindowPosition(windowX, windowY); glutInitWindowSize(width, height); windowID = glutCreateWindow("MANDELBROTH"); glShadeModel(GL_SMOOTH); glEnable(GL_DEPTH_TEST); // glViewport (0, 0, (GLsizei) width, (GLsizei) height); // glMatrixMode (GL_PROJECTION); // glLoadIdentity(); // glOrtho(minX, maxX, minY, maxY, ((GLfloat)-1), (GLfloat)1); // set the event handling methods short cx = 0; short cy = 0; for(GLfloat y = maxY; y >= minY; y -= stepY){ for(GLfloat x = minX; x <= maxX; x += stepX){ GLfloat* temp = calculateColor(x,y); // cout << temp; // cout << cx <<"\t"<< cy <<endl; mandelImage[cx][cy] = temp; cx++; } cy++; cx = 0; } glutDisplayFunc(repaint); // glutReshapeFunc(reshape); glutKeyboardFunc(keyFunction); glutMouseFunc(mouse); glutMotionFunc(drag); glutMainLoop(); // THis will callback the display, keyboard and mouse return 0; }
7924422f89be0cc856ae89dcc8f34d4c32ba1489.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions mixed zc -> ds */ #include "magma_internal.h" #define NB 64 // TODO check precision, as in zlag2c? __global__ void zclaswp_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaFloatComplex res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_C_MAKE( (float)MAGMA_Z_REAL( A[newind+i*lda] ), (float)MAGMA_Z_IMAG( A[newind+i*lda] )); SA[i*ldsa] = res; } } } __global__ void zclaswp_inv_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaDoubleComplex res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_Z_MAKE( (double)MAGMA_C_REAL( SA[newind+i*ldsa] ), (double)MAGMA_C_IMAG( SA[newind+i*ldsa] )); A[i*lda] = res; } } } /***************************************************************************//** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDSA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] ldsa INTEGER. LDSA specifies the leading dimension of SA. @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zclaswp( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = magma_ceildiv( m, NB ); dim3 grid( blocks ); dim3 threads( NB ); if (incx >= 0) hipLaunchKernelGGL(( zclaswp_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, m, ipiv); else hipLaunchKernelGGL(( zclaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, m, ipiv); }
7924422f89be0cc856ae89dcc8f34d4c32ba1489.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions mixed zc -> ds */ #include "magma_internal.h" #define NB 64 // TODO check precision, as in zlag2c? __global__ void zclaswp_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaFloatComplex res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_C_MAKE( (float)MAGMA_Z_REAL( A[newind+i*lda] ), (float)MAGMA_Z_IMAG( A[newind+i*lda] )); SA[i*ldsa] = res; } } } __global__ void zclaswp_inv_kernel( int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaDoubleComplex res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for (int i=0; i < n; i++) { res = MAGMA_Z_MAKE( (double)MAGMA_C_REAL( SA[newind+i*ldsa] ), (double)MAGMA_C_IMAG( SA[newind+i*ldsa] )); A[i*lda] = res; } } } /***************************************************************************//** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDSA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] ldsa INTEGER. LDSA specifies the leading dimension of SA. @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_zclaswp( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = magma_ceildiv( m, NB ); dim3 grid( blocks ); dim3 threads( NB ); if (incx >= 0) zclaswp_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, A, lda, SA, ldsa, m, ipiv); else zclaswp_inv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, A, lda, SA, ldsa, m, ipiv); }
a65e06ed79ee40fd0b8322f94af7d7f66675966e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/diag_kernel.h" #include <algorithm> #include <tuple> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/diag_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { // Extract the diagonal of a matrix 'x' to a vector 'out'. template <typename T> __global__ void ExtractDiagonalKernel(T* out, const T* x, std::ptrdiff_t start, std::ptrdiff_t size, const std::ptrdiff_t sumStride, const std::ptrdiff_t outStride) { for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; idx += gridDim.x * blockDim.x) { const std::ptrdiff_t xOffset = start + sumStride * idx; out[outStride * idx] = x[xOffset]; } } // Paste a vector 'x' to the diagonal of a matrix 'out' template <typename T> __global__ void PasteDiagonalKernel(T* out, const T* x, std::ptrdiff_t start, std::ptrdiff_t x_length, const std::ptrdiff_t sumStride, const std::ptrdiff_t xStride) { for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < x_length; idx += gridDim.x * blockDim.x) { const std::ptrdiff_t outOffset = start + sumStride * idx; out[outOffset] = x[xStride * idx]; } } template <typename T, typename Context> void DiagKernel(const Context& dev_ctx, const DenseTensor& x, int offset, float padding_value, DenseTensor* out) { auto* x_data = x.data<T>(); auto x_dims = x.dims(); T* out_data = dev_ctx.template Alloc<T>(out); auto out_dims = out->dims(); auto GetBlockGridSize = [&dev_ctx](int64_t size) { const int64_t block_size = ::min(size, static_cast<int64_t>(dev_ctx.GetMaxThreadsPerBlock())); int64_t max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int64_t max_blocks = ::max(((max_threads - 1) / block_size + 1), static_cast<int64_t>(1)); const int64_t grid_size = ::min(max_blocks, (size + block_size - 1) / block_size); return std::tuple<int64_t, int64_t>{block_size, grid_size}; }; if (x_dims.size() <= 1) { phi::funcs::SetConstant<Context, T> set_padding_value; set_padding_value(dev_ctx, out, static_cast<T>(padding_value)); auto x_length = (x_dims.size() == 1UL ? x_dims[0] : int64_t(1)); auto size = (offset > 0) ? x_length + offset : x_length - offset; const int& x_stride = 1; if (size > 0) { const auto& out_stride_0 = phi::funcs::ComputeStride(0, out_dims); const auto& out_stride_1 = phi::funcs::ComputeStride(1, out_dims); auto start = (offset >= 0 ? offset * out_stride_1 : -offset * out_stride_0); std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size); hipLaunchKernelGGL(( PasteDiagonalKernel<T>), dim3(std::get<1>(block_grid_size)), dim3(std::get<0>(block_grid_size)), 0, dev_ctx.stream(), out_data, x_data, start, x_length, out_stride_0 + out_stride_1, x_stride); } } else { const int& x_stride_0 = phi::funcs::ComputeStride(0, x_dims); const int& x_stride_1 = phi::funcs::ComputeStride(1, x_dims); int64_t size; if (offset > 0) { size = ::min(x_dims[0], x_dims[1] - offset); } else { size = ::min(x_dims[0] + offset, x_dims[1]); } if (size > 0) { auto start = (offset >= 0 ? offset * x_stride_1 : -offset * x_stride_0); const auto& out_stride_0 = phi::funcs::ComputeStride(0, out_dims); std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size); hipLaunchKernelGGL(( ExtractDiagonalKernel<T>), dim3(std::get<1>(block_grid_size)), dim3(std::get<0>(block_grid_size)), 0, dev_ctx.stream(), out_data, x_data, start, size, x_stride_0 + x_stride_1, out_stride_0); } } } } // namespace phi PD_REGISTER_KERNEL(diag, GPU, ALL_LAYOUT, phi::DiagKernel, phi::dtype::float16, phi::dtype::bfloat16, int, int64_t, float, double) {}
a65e06ed79ee40fd0b8322f94af7d7f66675966e.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/diag_kernel.h" #include <algorithm> #include <tuple> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/diag_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { // Extract the diagonal of a matrix 'x' to a vector 'out'. template <typename T> __global__ void ExtractDiagonalKernel(T* out, const T* x, std::ptrdiff_t start, std::ptrdiff_t size, const std::ptrdiff_t sumStride, const std::ptrdiff_t outStride) { for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; idx += gridDim.x * blockDim.x) { const std::ptrdiff_t xOffset = start + sumStride * idx; out[outStride * idx] = x[xOffset]; } } // Paste a vector 'x' to the diagonal of a matrix 'out' template <typename T> __global__ void PasteDiagonalKernel(T* out, const T* x, std::ptrdiff_t start, std::ptrdiff_t x_length, const std::ptrdiff_t sumStride, const std::ptrdiff_t xStride) { for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < x_length; idx += gridDim.x * blockDim.x) { const std::ptrdiff_t outOffset = start + sumStride * idx; out[outOffset] = x[xStride * idx]; } } template <typename T, typename Context> void DiagKernel(const Context& dev_ctx, const DenseTensor& x, int offset, float padding_value, DenseTensor* out) { auto* x_data = x.data<T>(); auto x_dims = x.dims(); T* out_data = dev_ctx.template Alloc<T>(out); auto out_dims = out->dims(); auto GetBlockGridSize = [&dev_ctx](int64_t size) { const int64_t block_size = std::min(size, static_cast<int64_t>(dev_ctx.GetMaxThreadsPerBlock())); int64_t max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int64_t max_blocks = std::max(((max_threads - 1) / block_size + 1), static_cast<int64_t>(1)); const int64_t grid_size = std::min(max_blocks, (size + block_size - 1) / block_size); return std::tuple<int64_t, int64_t>{block_size, grid_size}; }; if (x_dims.size() <= 1) { phi::funcs::SetConstant<Context, T> set_padding_value; set_padding_value(dev_ctx, out, static_cast<T>(padding_value)); auto x_length = (x_dims.size() == 1UL ? x_dims[0] : int64_t(1)); auto size = (offset > 0) ? x_length + offset : x_length - offset; const int& x_stride = 1; if (size > 0) { const auto& out_stride_0 = phi::funcs::ComputeStride(0, out_dims); const auto& out_stride_1 = phi::funcs::ComputeStride(1, out_dims); auto start = (offset >= 0 ? offset * out_stride_1 : -offset * out_stride_0); std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size); PasteDiagonalKernel<T><<<std::get<1>(block_grid_size), std::get<0>(block_grid_size), 0, dev_ctx.stream()>>>(out_data, x_data, start, x_length, out_stride_0 + out_stride_1, x_stride); } } else { const int& x_stride_0 = phi::funcs::ComputeStride(0, x_dims); const int& x_stride_1 = phi::funcs::ComputeStride(1, x_dims); int64_t size; if (offset > 0) { size = std::min(x_dims[0], x_dims[1] - offset); } else { size = std::min(x_dims[0] + offset, x_dims[1]); } if (size > 0) { auto start = (offset >= 0 ? offset * x_stride_1 : -offset * x_stride_0); const auto& out_stride_0 = phi::funcs::ComputeStride(0, out_dims); std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size); ExtractDiagonalKernel<T><<<std::get<1>(block_grid_size), std::get<0>(block_grid_size), 0, dev_ctx.stream()>>>( out_data, x_data, start, size, x_stride_0 + x_stride_1, out_stride_0); } } } } // namespace phi PD_REGISTER_KERNEL(diag, GPU, ALL_LAYOUT, phi::DiagKernel, phi::dtype::float16, phi::dtype::bfloat16, int, int64_t, float, double) {}
a57f27fb79b3ef3cbf4dfebe84457fd2bd2b7dd6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel_verticalReversal(dev_t *src, dev_t *dst, uint pitch_src, uint pitch_dst, uint pixel_w, uint pixel_h) { unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y; if (dim_x < pixel_w && dim_y < pixel_h) { unsigned int rgba = *((uint32_t*)src + dim_y * pitch_src / 4 + dim_x); *((uint32_t*)dst + (pixel_h - dim_y) * pitch_dst / 4 + dim_x) = rgba; } }
a57f27fb79b3ef3cbf4dfebe84457fd2bd2b7dd6.cu
#include "includes.h" __global__ void kernel_verticalReversal(dev_t *src, dev_t *dst, uint pitch_src, uint pitch_dst, uint pixel_w, uint pixel_h) { unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y; if (dim_x < pixel_w && dim_y < pixel_h) { unsigned int rgba = *((uint32_t*)src + dim_y * pitch_src / 4 + dim_x); *((uint32_t*)dst + (pixel_h - dim_y) * pitch_dst / 4 + dim_x) = rgba; } }
85b5a70f2a708b07868b880580d2d7843ea04681.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <Rinternals.h> #include <cur.h> #include "check.hh" #define DEVICE_TO_HOST 1 #define HOST_TO_DEVICE 2 #define DEVICE_TO_DEVICE 3 #define SET_ROBJ_PTR(ptr, R_ptr) \ if (TYPEOF(R_ptr) == INTSXP){ \ ptr = (void*) INTEGER(R_ptr); \ } else if (TYPEOF(R_ptr) == REALSXP){ \ ptr = (void*) REAL(R_ptr); \ } #define LOOKUP_SIZE(data_type) \ (data_type == CUR_TYPE_INT ? sizeof(int) : (data_type == CUR_TYPE_FLOAT ? sizeof(float) : sizeof(double))) #define newRptr(ptr,Rptr,fin) PROTECT(Rptr = R_MakeExternalPtr(ptr, R_NilValue, R_NilValue));R_RegisterCFinalizerEx(Rptr, fin, TRUE) #define getRptr(ptr) R_ExternalPtrAddr(ptr) static inline void cuda_object_finalizer(SEXP Rptr) { void *x = getRptr(Rptr); if (x == NULL) return; CHECK_CUDA(hipFree(x)); R_ClearExternalPtr(Rptr); } extern "C" SEXP R_cudaFree(SEXP x_ptr) { void *x = getRptr(x_ptr); CHECK_CUDA(hipFree(x)); return R_NilValue; } extern "C" SEXP R_cudaMalloc(SEXP n, SEXP size) { SEXP ret; void *x; size_t len = (size_t) REAL(n)[0] * LOOKUP_SIZE(INTEGER(size)[0]); CHECK_CUDA(hipMalloc(&x, len)); newRptr(x, ret, cuda_object_finalizer); UNPROTECT(1); return ret; } extern "C" SEXP R_cudaMemcpy(SEXP dst_, SEXP src_, SEXP count, SEXP size, SEXP kind_) { void *dst; void *src; int kind = INTEGER(kind_)[0]; size_t len = (size_t) REAL(count)[0] * LOOKUP_SIZE(INTEGER(size)[0]); if (kind == DEVICE_TO_HOST) { SET_ROBJ_PTR(dst, dst_); src = getRptr(src_); CHECK_CUDA(hipMemcpy(dst, src, len, hipMemcpyDeviceToHost)); } else if (kind == HOST_TO_DEVICE) { dst = getRptr(dst_); SET_ROBJ_PTR(src, src_); CHECK_CUDA(hipMemcpy(dst, src, len, hipMemcpyHostToDevice)); } else if (kind == DEVICE_TO_DEVICE) { dst = getRptr(dst_); src = getRptr(src_); CHECK_CUDA(hipMemcpy(dst, src, len, hipMemcpyHostToDevice)); } return R_NilValue; } extern "C" SEXP R_cudaMemGetInfo() { SEXP ret, ret_names; SEXP free, total; size_t mem_free, mem_total; PROTECT(ret = allocVector(VECSXP, 2)); PROTECT(ret_names = allocVector(STRSXP, 2)); PROTECT(free = allocVector(REALSXP, 1)); PROTECT(total = allocVector(REALSXP, 1)); CHECK_CUDA(hipMemGetInfo(&mem_free, &mem_total)); REAL(free)[0] = (double) mem_free; REAL(total)[0] = (double) mem_total; SET_VECTOR_ELT(ret, 0, free); SET_VECTOR_ELT(ret, 1, total); SET_STRING_ELT(ret_names, 0, mkChar("free")); SET_STRING_ELT(ret_names, 1, mkChar("total")); setAttrib(ret, R_NamesSymbol, ret_names); UNPROTECT(4); return ret; } extern "C" SEXP R_cudaMemset(SEXP x_ptr, SEXP value, SEXP count, SEXP size) { void *x = getRptr(x_ptr); size_t len = (size_t) REAL(count)[0] * LOOKUP_SIZE(INTEGER(size)[0]); CHECK_CUDA(hipMemset(x, INTEGER(value)[0], len)); return R_NilValue; }
85b5a70f2a708b07868b880580d2d7843ea04681.cu
#include <cuda_runtime.h> #include <Rinternals.h> #include <cur.h> #include "check.hh" #define DEVICE_TO_HOST 1 #define HOST_TO_DEVICE 2 #define DEVICE_TO_DEVICE 3 #define SET_ROBJ_PTR(ptr, R_ptr) \ if (TYPEOF(R_ptr) == INTSXP){ \ ptr = (void*) INTEGER(R_ptr); \ } else if (TYPEOF(R_ptr) == REALSXP){ \ ptr = (void*) REAL(R_ptr); \ } #define LOOKUP_SIZE(data_type) \ (data_type == CUR_TYPE_INT ? sizeof(int) : (data_type == CUR_TYPE_FLOAT ? sizeof(float) : sizeof(double))) #define newRptr(ptr,Rptr,fin) PROTECT(Rptr = R_MakeExternalPtr(ptr, R_NilValue, R_NilValue));R_RegisterCFinalizerEx(Rptr, fin, TRUE) #define getRptr(ptr) R_ExternalPtrAddr(ptr) static inline void cuda_object_finalizer(SEXP Rptr) { void *x = getRptr(Rptr); if (x == NULL) return; CHECK_CUDA(cudaFree(x)); R_ClearExternalPtr(Rptr); } extern "C" SEXP R_cudaFree(SEXP x_ptr) { void *x = getRptr(x_ptr); CHECK_CUDA(cudaFree(x)); return R_NilValue; } extern "C" SEXP R_cudaMalloc(SEXP n, SEXP size) { SEXP ret; void *x; size_t len = (size_t) REAL(n)[0] * LOOKUP_SIZE(INTEGER(size)[0]); CHECK_CUDA(cudaMalloc(&x, len)); newRptr(x, ret, cuda_object_finalizer); UNPROTECT(1); return ret; } extern "C" SEXP R_cudaMemcpy(SEXP dst_, SEXP src_, SEXP count, SEXP size, SEXP kind_) { void *dst; void *src; int kind = INTEGER(kind_)[0]; size_t len = (size_t) REAL(count)[0] * LOOKUP_SIZE(INTEGER(size)[0]); if (kind == DEVICE_TO_HOST) { SET_ROBJ_PTR(dst, dst_); src = getRptr(src_); CHECK_CUDA(cudaMemcpy(dst, src, len, cudaMemcpyDeviceToHost)); } else if (kind == HOST_TO_DEVICE) { dst = getRptr(dst_); SET_ROBJ_PTR(src, src_); CHECK_CUDA(cudaMemcpy(dst, src, len, cudaMemcpyHostToDevice)); } else if (kind == DEVICE_TO_DEVICE) { dst = getRptr(dst_); src = getRptr(src_); CHECK_CUDA(cudaMemcpy(dst, src, len, cudaMemcpyHostToDevice)); } return R_NilValue; } extern "C" SEXP R_cudaMemGetInfo() { SEXP ret, ret_names; SEXP free, total; size_t mem_free, mem_total; PROTECT(ret = allocVector(VECSXP, 2)); PROTECT(ret_names = allocVector(STRSXP, 2)); PROTECT(free = allocVector(REALSXP, 1)); PROTECT(total = allocVector(REALSXP, 1)); CHECK_CUDA(cudaMemGetInfo(&mem_free, &mem_total)); REAL(free)[0] = (double) mem_free; REAL(total)[0] = (double) mem_total; SET_VECTOR_ELT(ret, 0, free); SET_VECTOR_ELT(ret, 1, total); SET_STRING_ELT(ret_names, 0, mkChar("free")); SET_STRING_ELT(ret_names, 1, mkChar("total")); setAttrib(ret, R_NamesSymbol, ret_names); UNPROTECT(4); return ret; } extern "C" SEXP R_cudaMemset(SEXP x_ptr, SEXP value, SEXP count, SEXP size) { void *x = getRptr(x_ptr); size_t len = (size_t) REAL(count)[0] * LOOKUP_SIZE(INTEGER(size)[0]); CHECK_CUDA(cudaMemset(x, INTEGER(value)[0], len)); return R_NilValue; }
5021bd1a279973fce106947fb73db6dcb46d02f7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrix_mul_shared.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *ad = NULL; hipMalloc(&ad, XSIZE*YSIZE); float *bd = NULL; hipMalloc(&bd, XSIZE*YSIZE); float *cd = NULL; hipMalloc(&cd, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrix_mul_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, ad,bd,cd,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrix_mul_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, ad,bd,cd,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrix_mul_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, ad,bd,cd,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5021bd1a279973fce106947fb73db6dcb46d02f7.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrix_mul_shared.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *ad = NULL; cudaMalloc(&ad, XSIZE*YSIZE); float *bd = NULL; cudaMalloc(&bd, XSIZE*YSIZE); float *cd = NULL; cudaMalloc(&cd, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrix_mul_shared<<<gridBlock,threadBlock>>>(ad,bd,cd,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrix_mul_shared<<<gridBlock,threadBlock>>>(ad,bd,cd,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrix_mul_shared<<<gridBlock,threadBlock>>>(ad,bd,cd,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fdb4eb8970b9310bd6e45d017a50ebaf79fef997.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "image/unpack.hpp" #include "colorArrayDevice.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceBuffer2D.hpp" #include "backend/cuda/surface.hpp" #include "backend/cuda/deviceStream.hpp" #include "cuda/util.hpp" #include "unpackKernel.cu" #include <hip/hip_runtime.h> #include <cassert> const unsigned int CudaBlockSize = 16; namespace VideoStitch { namespace Image { // ---------------- Convert RGBA -> other colorspace -------------------------- Status unpackRGB(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackKernelRGB), dim3(dimGrid), dim3(dimBlock), 0, s.get(), dst.get().raw(), (unsigned)dst.getPitch(), array.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackRGB(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelRGB), dim3(dimGrid), dim3(dimBlock), 0, s.get(), dst.get().raw(), (unsigned)dst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackRGBA(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t /*width*/, std::size_t /*height*/, GPU::Stream s) { return CUDA_ERROR(hipMemcpy2DAsync(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), dst.getWidth(), dst.getWidth(), dst.getHeight(), hipMemcpyDeviceToDevice, s.get())); } Status unpackRGBA(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelRGBA), dim3(dimGrid), dim3(dimBlock), 0, s.get(), (uint32_t*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(uint32_t), // pitch is in bytes surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackF32C1(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t /*width*/, std::size_t /*height*/, GPU::Stream s) { return CUDA_ERROR(hipMemcpy2DAsync(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), dst.getWidth(), dst.getWidth(), dst.getHeight(), hipMemcpyDeviceToDevice, s.get())); } Status unpackF32C1(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelF32C1), dim3(dimGrid), dim3(dimBlock), 0, s.get(), (float*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(float), // pitch is in bytes surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackGrayscale16(GPU::Buffer2D& /* dst */, const GPU::Buffer<const uint32_t>& /* input */, size_t /* width*/, size_t /* height */, GPU::Stream /* s */) { // TODO return {Origin::GPU, ErrType::UnsupportedAction, "Color space conversion for Grayscale16 not implemented from buffer"}; } Status unpackGrayscale16(GPU::Buffer2D& dst, const GPU::Surface& surf, size_t width, size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelGrayscale16), dim3(dimGrid), dim3(dimBlock), 0, s.get(), (uint16_t*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(uint16_t), // pitch is in bytes surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackDepth(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackKernelDepth), dim3(dimGrid), dim3(dimBlock), 0, s.get(), yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), (float*)array.get().raw(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackDepth(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelDepth), dim3(dimGrid), dim3(dimBlock), 0, s.get(), yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackKernelYV12), dim3(dimGrid), dim3(dimBlock), 0, s.get(), yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), array.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelYV12), dim3(dimGrid), dim3(dimBlock), 0, s.get(), yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackNV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uvDst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackSourceKernelNV12), dim3(dimGrid), dim3(dimBlock), 0, s.get(), yDst.get().raw(), (unsigned)yDst.getPitch(), uvDst.get().raw(), (unsigned)uvDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackNV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uvDst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackKernelNV12), dim3(dimGrid), dim3(dimBlock), 0, s.get(), yDst.get().raw(), (unsigned)yDst.getPitch(), uvDst.get().raw(), (unsigned)uvDst.getPitch(), array.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUY2(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackYUY2Kernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().raw(), (unsigned)dst.getPitch(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUY2(GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) { return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"}; } Status unpackUYVY(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackUYVYKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().raw(), (unsigned)dst.getPitch(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackUYVY(GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) { return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"}; } Status convertGrayscale(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertKernelGrayscale), dim3(dimGrid2D), dim3(dimBlock2D), 0, stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUV422P10(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( unpackYUV422P10Kernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), reinterpret_cast<uint16_t*>(yDst.get().raw()), (unsigned)yDst.getPitch() / 2, reinterpret_cast<uint16_t*>(uDst.get().raw()), (unsigned)uDst.getPitch() / 2, reinterpret_cast<uint16_t*>(vDst.get().raw()), (unsigned)vDst.getPitch() / 2, src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUV422P10(GPU::Buffer2D&, GPU::Buffer2D&, GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) { return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"}; } Status unpackGrayscale(GPU::Buffer2D& dst, const GPU::Surface& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); hipLaunchKernelGGL(( unpackKernelGrayscale), dim3(dimGrid2D), dim3(dimBlock2D), 0, stream.get(), dst.get().raw(), (unsigned)dst.getPitch(), src.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } // ---------------- Convert other colorspace -> RGBA -------------------------- Status convertRGBToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertRGBToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertRGB210ToRGBA(GPU::Surface& dst, GPU::Buffer<const uint32_t> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertRGB210ToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBGRToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertBGRToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), 0, stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBGRUToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertBGRUToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), 0, stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerRGGBToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertBayerRGGBToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerBGGRToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertBayerBGGRToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerGRBGToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertBayerGRBGToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerGBRGToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); hipLaunchKernelGGL(( convertBayerGBRGToRGBAKernel), dim3(dimGrid2D), dim3(dimBlock2D), sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertUYVYToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(16, 16, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( convertUYVYToRGBAKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYUV422P10ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( convertYUV422P10ToRGBAKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), src.as<const uint16_t>().get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYUY2ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(16, 16, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( convertYUY2ToRGBAKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYV12ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y), 1); hipLaunchKernelGGL(( convertYV12ToRGBAKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertNV12ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y), 1); hipLaunchKernelGGL(( convertNV12ToRGBAKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYUV420ToMono(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y), 1); hipLaunchKernelGGL(( unpackMonoKernelYUV420P), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertGrayscaleToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( convertGrayscaleKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } } // namespace Image } // namespace VideoStitch
fdb4eb8970b9310bd6e45d017a50ebaf79fef997.cu
// Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "image/unpack.hpp" #include "colorArrayDevice.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceBuffer2D.hpp" #include "backend/cuda/surface.hpp" #include "backend/cuda/deviceStream.hpp" #include "cuda/util.hpp" #include "unpackKernel.cu" #include <cuda_runtime.h> #include <cassert> const unsigned int CudaBlockSize = 16; namespace VideoStitch { namespace Image { // ---------------- Convert RGBA -> other colorspace -------------------------- Status unpackRGB(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackKernelRGB<<<dimGrid, dimBlock, 0, s.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackRGB(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackSourceKernelRGB<<<dimGrid, dimBlock, 0, s.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackRGBA(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t /*width*/, std::size_t /*height*/, GPU::Stream s) { return CUDA_ERROR(cudaMemcpy2DAsync(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), dst.getWidth(), dst.getWidth(), dst.getHeight(), cudaMemcpyDeviceToDevice, s.get())); } Status unpackRGBA(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackSourceKernelRGBA<<<dimGrid, dimBlock, 0, s.get()>>>( (uint32_t*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(uint32_t), // pitch is in bytes surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackF32C1(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t /*width*/, std::size_t /*height*/, GPU::Stream s) { return CUDA_ERROR(cudaMemcpy2DAsync(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), dst.getWidth(), dst.getWidth(), dst.getHeight(), cudaMemcpyDeviceToDevice, s.get())); } Status unpackF32C1(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackSourceKernelF32C1<<<dimGrid, dimBlock, 0, s.get()>>>( (float*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(float), // pitch is in bytes surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackGrayscale16(GPU::Buffer2D& /* dst */, const GPU::Buffer<const uint32_t>& /* input */, size_t /* width*/, size_t /* height */, GPU::Stream /* s */) { // TODO return {Origin::GPU, ErrType::UnsupportedAction, "Color space conversion for Grayscale16 not implemented from buffer"}; } Status unpackGrayscale16(GPU::Buffer2D& dst, const GPU::Surface& surf, size_t width, size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackSourceKernelGrayscale16<<<dimGrid, dimBlock, 0, s.get()>>>( (uint16_t*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(uint16_t), // pitch is in bytes surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackDepth(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); unpackKernelDepth<<<dimGrid, dimBlock, 0, s.get()>>>( yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), (float*)array.get().raw(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackDepth(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); unpackSourceKernelDepth<<<dimGrid, dimBlock, 0, s.get()>>>( yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); unpackKernelYV12<<<dimGrid, dimBlock, 0, s.get()>>>( yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), array.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); unpackSourceKernelYV12<<<dimGrid, dimBlock, 0, s.get()>>>( yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(), (unsigned)vDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackNV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uvDst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); unpackSourceKernelNV12<<<dimGrid, dimBlock, 0, s.get()>>>(yDst.get().raw(), (unsigned)yDst.getPitch(), uvDst.get().raw(), (unsigned)uvDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackNV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uvDst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1); unpackKernelNV12<<<dimGrid, dimBlock, 0, s.get()>>>(yDst.get().raw(), (unsigned)yDst.getPitch(), uvDst.get().raw(), (unsigned)uvDst.getPitch(), array.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUY2(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackYUY2Kernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUY2(GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) { return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"}; } Status unpackUYVY(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackUYVYKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackUYVY(GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) { return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"}; } Status convertGrayscale(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); convertKernelGrayscale<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUV422P10(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); unpackYUV422P10Kernel<<<dimGrid, dimBlock, 0, stream.get()>>>( reinterpret_cast<uint16_t*>(yDst.get().raw()), (unsigned)yDst.getPitch() / 2, reinterpret_cast<uint16_t*>(uDst.get().raw()), (unsigned)uDst.getPitch() / 2, reinterpret_cast<uint16_t*>(vDst.get().raw()), (unsigned)vDst.getPitch() / 2, src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status unpackYUV422P10(GPU::Buffer2D&, GPU::Buffer2D&, GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) { return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"}; } Status unpackGrayscale(GPU::Buffer2D& dst, const GPU::Surface& src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); unpackKernelGrayscale<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>( dst.get().raw(), (unsigned)dst.getPitch(), src.get().surface(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } // ---------------- Convert other colorspace -> RGBA -------------------------- Status convertRGBToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); convertRGBToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertRGB210ToRGBA(GPU::Surface& dst, GPU::Buffer<const uint32_t> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); convertRGB210ToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBGRToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1); convertBGRToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBGRUToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); convertBGRUToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerRGGBToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); convertBayerRGGBToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerBGGRToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); convertBayerBGGRToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerGRBGToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); convertBayerGRBGToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertBayerGBRGToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); assert(!(height & 1)); const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1); convertBayerGBRGToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1), stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertUYVYToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(16, 16, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); convertUYVYToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYUV422P10ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { assert(!(width & 1)); const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); convertYUV422P10ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>( dst.get().surface(), src.as<const uint16_t>().get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYUY2ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(16, 16, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); convertYUY2ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYV12ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y), 1); convertYV12ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertNV12ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y), 1); convertNV12ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertYUV420ToMono(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); assert(!(width & 1)); assert(!(height & 1)); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y), 1); unpackMonoKernelYUV420P<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } Status convertGrayscaleToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height, GPU::Stream stream) { const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); convertGrayscaleKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width, (unsigned)height); return CUDA_STATUS; } } // namespace Image } // namespace VideoStitch
b1fd0a7db079a2380e324b704986100236581fb3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cu_minus.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); const float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cu_minus), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cu_minus), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cu_minus), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b1fd0a7db079a2380e324b704986100236581fb3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cu_minus.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); const float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cu_minus<<<gridBlock,threadBlock>>>(A,B,C,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cu_minus<<<gridBlock,threadBlock>>>(A,B,C,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cu_minus<<<gridBlock,threadBlock>>>(A,B,C,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
62fa696e811c983fbeced1aa23cf8c30513d4cee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include <c10/hip/HIPDeviceAssertion.h> #include <c10/hip/HIPException.h> #include <c10/hip/HIPFunctions.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <chrono> #include <iostream> #include <string> #include <thread> using ::testing::HasSubstr; const auto max_assertions_failure_str = "Assertion failure " + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1); /** * Device kernel that takes a single integer parameter as argument and * will always trigger a device side assertion. */ __global__ void cuda_always_fail_assertion_kernel( const int a, TORCH_DSA_KERNEL_ARGS) { CUDA_KERNEL_ASSERT2(a != a); } /** * TEST: Triggering device side assertion from single block and multiple threads * <<<1,128>>>. Once the very first thread asserts all the other threads will * basically be in bad state and the block id with failed asseriton would be * [0,0,0]. */ void cuda_device_assertions_multiple_writes_from_same_block() { const auto stream = c10::hip::getStreamFromPoolMasqueradingAsCUDA(); TORCH_DSA_KERNEL_LAUNCH( cuda_always_fail_assertion_kernel, 1, /* Blocks */ 128, /* Threads */ 0, /* Shared mem */ stream, /* Stream */ 1); try { c10::hip::device_synchronize(); throw std::runtime_error("Test didn't fail, but should have."); } catch (const c10::Error& err) { const auto err_str = std::string(err.what()); ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str)); ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]")); ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0")); ASSERT_THAT( err_str, HasSubstr( "Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel")); ASSERT_THAT( err_str, HasSubstr("File containing kernel launch = " __FILE__)); ASSERT_THAT( err_str, HasSubstr( "Function containing kernel launch = " + std::string(__FUNCTION__))); ASSERT_THAT( err_str, HasSubstr( "Stream kernel was launched on = " + std::to_string(stream.id()))); } } TEST(CUDATest, cuda_device_assertions_multiple_writes_from_same_block) { #ifdef TORCH_USE_CUDA_DSA c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = true; cuda_device_assertions_multiple_writes_from_same_block(); #else GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled."; #endif }
62fa696e811c983fbeced1aa23cf8c30513d4cee.cu
#include <gmock/gmock.h> #include <gtest/gtest.h> #include <c10/cuda/CUDADeviceAssertion.h> #include <c10/cuda/CUDAException.h> #include <c10/cuda/CUDAFunctions.h> #include <c10/cuda/CUDAStream.h> #include <chrono> #include <iostream> #include <string> #include <thread> using ::testing::HasSubstr; const auto max_assertions_failure_str = "Assertion failure " + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1); /** * Device kernel that takes a single integer parameter as argument and * will always trigger a device side assertion. */ __global__ void cuda_always_fail_assertion_kernel( const int a, TORCH_DSA_KERNEL_ARGS) { CUDA_KERNEL_ASSERT2(a != a); } /** * TEST: Triggering device side assertion from single block and multiple threads * <<<1,128>>>. Once the very first thread asserts all the other threads will * basically be in bad state and the block id with failed asseriton would be * [0,0,0]. */ void cuda_device_assertions_multiple_writes_from_same_block() { const auto stream = c10::cuda::getStreamFromPool(); TORCH_DSA_KERNEL_LAUNCH( cuda_always_fail_assertion_kernel, 1, /* Blocks */ 128, /* Threads */ 0, /* Shared mem */ stream, /* Stream */ 1); try { c10::cuda::device_synchronize(); throw std::runtime_error("Test didn't fail, but should have."); } catch (const c10::Error& err) { const auto err_str = std::string(err.what()); ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str)); ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]")); ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0")); ASSERT_THAT( err_str, HasSubstr( "Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel")); ASSERT_THAT( err_str, HasSubstr("File containing kernel launch = " __FILE__)); ASSERT_THAT( err_str, HasSubstr( "Function containing kernel launch = " + std::string(__FUNCTION__))); ASSERT_THAT( err_str, HasSubstr( "Stream kernel was launched on = " + std::to_string(stream.id()))); } } TEST(CUDATest, cuda_device_assertions_multiple_writes_from_same_block) { #ifdef TORCH_USE_CUDA_DSA c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = true; cuda_device_assertions_multiple_writes_from_same_block(); #else GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled."; #endif }
ab1c0dc7e84b2bc658f691176fcdb88075bb424e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include "windows.h" #include <time.h> #include "GL/glee.h" #include "consts.h" #include "nanorod.h" #include "global.h" #include "film.h" #include "tracer.h" #include "obj_object.h" #include "texture.h" #include "IL/ilut.h" #include "GL/glut.h" #include "GL/glui.h" #include "gpu_util.h" #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include "gpu_util.cu" #include "tracer.cu" #define _MSVC #include "cWorldVol.h" #include "cCameraVol.h" #include "chai3d/src/chai3d.h" /////////////////////////// float *deviceData = NULL; int *idData = NULL; /////////////////////////// // //***Haptic globals*** float *hostData = NULL; int *hostIdData = NULL; void initHaptic(); // function called before exiting the application void closeHaptic(void); // main graphics callback void updateHapticGraphics(void); // main haptics loop void updateHaptics(void); const int MAX_DEVICES = 1; // a world that contains all objects of the virtual environment cWorldVol* world; // a camera that renders the world in a window display cCameraVol* camera; // a light source to illuminate the objects in the virtual scene cLight *light; // a little "chai3d" bitmap logo at the bottom of the screen cBitmap* logo; // width and height of the current window display int displayW; int displayH; // a haptic device handler cHapticDeviceHandler* handler; // a table containing pointers to all haptic devices detected on this computer cGenericHapticDevice* hapticDevices[MAX_DEVICES]; // a table containing pointers to label which display the position of // each haptic device cLabel* labels[MAX_DEVICES]; cGenericObject* rootLabels; // number of haptic devices detected int numHapticDevices; // table containing a list of 3D cursors for each haptic device cShapeSphere* cursors[MAX_DEVICES]; // table containing a list of lines to display velocity cShapeLine* velocityVectors[MAX_DEVICES]; // material properties used to render the color of the cursors cMaterial matCursorButtonON; cMaterial matCursorButtonOFF; // status of the main simulation haptics loop bool simulationRunning; // root resource path string resourceRoot; // damping mode ON/OFF bool useDamping; // force field mode ON/OFF bool useForceField; // has exited haptics simulation thread bool simulationFinished; //Camera tool vector vect3d vCameraToolVect; //Toool position cVector3d posTool; // a virtual tool representing the haptic device in the scene cGeneric3dofPointer* tool; // a spherical object representing the volume so it can have material properties // and we can change them when volume values change cShapeSphere* object0; double stiffnessMax; double forceMax; double dampingMax; int typeOfForce = 0; int shouldDrawAxes = 1; ObjObject *pCap0; //***Haptic Globals end*** //***Haptic Functions*** void initHaptic() { displayW = 0; displayH = 0; numHapticDevices = 0; simulationRunning = false; useDamping = false; useForceField = true; simulationFinished = false; // create a new world. world = new cWorldVol(); // set the background color of the environment // the color is defined by its (R,G,B) components. world->setBackgroundColor(0.0, 0.0, 0.0); // create a camera and insert it into the virtual world camera = new cCameraVol(world); world->addChild(camera); // position and oriente the camera camera->set( cVector3d (0.5, 0.0, 0.0), // camera position (eye) cVector3d (0.0, 0.0, 0.0), // lookat position (target) cVector3d (0.0, 0.0, 1.0)); // direction of the "up" vector // set the near and far clipping planes of the camera // anything in front/behind these clipping planes will not be rendered camera->setClippingPlanes(0.01, 10.0); // create a light source and attach it to the camera light = new cLight(world); camera->addChild(light); // attach light to camera light->setEnabled(true); // enable light source light->setPos(cVector3d( 2.0, 0.5, 1.0)); // position the light source light->setDir(cVector3d(-2.0, 0.5, 1.0)); // define the direction of the light beam //----------------------------------------------------------------------- // HAPTIC DEVICES / TOOLS //----------------------------------------------------------------------- // create a haptic device handler handler = new cHapticDeviceHandler(); // read the number of haptic devices currently connected to the computer numHapticDevices = handler->getNumDevices(); // limit the number of devices to MAX_DEVICES numHapticDevices = cMin(numHapticDevices, MAX_DEVICES); // create a node on which we will attach small labels that display the // position of each haptic device rootLabels = new cGenericObject(); camera->m_front_2Dscene.addChild(rootLabels); // create a small label as title cLabel* titleLabel = new cLabel(); rootLabels->addChild(titleLabel); // define its position, color and string message titleLabel->setPos(0, 30, 0); titleLabel->m_fontColor.set(1.0, 1.0, 1.0); titleLabel->m_string = "Haptic Device Pos [mm]:"; // for each available haptic device, create a 3D cursor // and a small line to show velocity int i = 0; while (i < numHapticDevices) { // get a handle to the next haptic device cGenericHapticDevice* newHapticDevice; handler->getDevice(newHapticDevice, i); // open connection to haptic device newHapticDevice->open(); // initialize haptic device newHapticDevice->initialize(); // store the handle in the haptic device table hapticDevices[i] = newHapticDevice; // retrieve information about the current haptic device cHapticDeviceInfo info = newHapticDevice->getSpecifications(); // create a 3D tool and add it to the world tool = new cGeneric3dofPointer(world); world->addChild(tool); // connect the haptic device to the tool tool->setHapticDevice(hapticDevices[i]); // initialize tool by connecting to haptic device tool->start(); // map the physical workspace of the haptic device to a larger virtual workspace. tool->setWorkspaceRadius(1.0); // define a radius for the tool tool->setRadius(0.03); // read the scale factor between the physical workspace of the haptic // device and the virtual workspace defined for the tool double workspaceScaleFactor = tool->getWorkspaceScaleFactor(); // define a maximum stiffness that can be handled by the current // haptic device. The value is scaled to take into account the // workspace scale factor stiffnessMax = info.m_maxForceStiffness / workspaceScaleFactor; forceMax = info.m_maxForce; // define the maximum damping factor that can be handled by the // current haptic device. The The value is scaled to take into account the // workspace scale factor dampingMax = info.m_maxLinearDamping / workspaceScaleFactor; ///////////////////////////////////////////////////////////////////////// // OBJECT 0: "VIBRATIONS" //////////////////////////////////////////////////////////////////////// // temp variable cGenericEffect* newEffect; // create a sphere and define its radius object0 = new cShapeSphere(2.0); // add object to world world->addChild(object0); // set the position of the object at the center of the world object0->setPos(0.0, 0.0, 0.0); object0->setUseTexture(false); // create a haptic viscous effect newEffect = new cEffectVibration(object0); object0->addEffect(newEffect); newEffect = new cEffectSurface(object0); object0->addEffect(newEffect); //newEffect = new cEffectViscosity(object0); //object0->addEffect(newEffect); //newEffect = new cEffectMagnet(object0); //object0->addEffect(newEffect); // create a cursor by setting its radius cShapeSphere* newCursor = new cShapeSphere(0.01); // add cursor to the world world->addChild(newCursor); // add cursor to the cursor table cursors[i] = newCursor; // create a small line to illustrate velocity cShapeLine* newLine = new cShapeLine(cVector3d(0,0,0), cVector3d(0,0,0)); velocityVectors[i] = newLine; // add line to the world world->addChild(newLine); // create a string that concatenates the device number and model name. string strID; cStr(strID, i); string strDevice = "#" + strID + " - " +info.m_modelName; // attach a small label next to the cursor to indicate device information cLabel* newLabel = new cLabel(); newCursor->addChild(newLabel); newLabel->m_string = strDevice; newLabel->setPos(0.00, 0.02, 0.00); newLabel->m_fontColor.set(1.0, 1.0, 1.0); // if the device provided orientation sensing (stylus), a reference // frame is displayed if (info.m_sensedRotation == true) { // display a reference frame newCursor->setShowFrame(true); // set the size of the reference frame newCursor->setFrameSize(0.05, 0.05); } // crate a small label to indicate the position of the device cLabel* newPosLabel = new cLabel(); rootLabels->addChild(newPosLabel); newPosLabel->setPos(0, -20 * i, 0); newPosLabel->m_fontColor.set(0.6, 0.6, 0.6); labels[i] = newPosLabel; // increment counter i++; } // simulation in now running simulationRunning = true; // create a thread which starts the main haptics rendering loop cThread* hapticsThread = new cThread(); hapticsThread->set(updateHaptics, CHAI_THREAD_PRIORITY_HAPTICS); } void closeHaptic(void) { // stop the simulation simulationRunning = false; // wait for graphics and haptics loops to terminate while (!simulationFinished) { cSleepMs(100); } // close all haptic devices int i=0; while (i < numHapticDevices) { hapticDevices[i]->close(); i++; } } float getElecCellValue(int x, int y, int z, float *elecData, int *idData) { if( x < 0 || x >= VOL_X || y < 0 || y >= VOL_Y || z < 0 || z >= VOL_Z ) // Hard-code it for now { return 0; } unsigned offset = x + y * VOL_X + z * VOL_X * VOL_Y; return *(elecData + offset); } cVector3d toolCoord2VolCoord(cVector3d toolCoord) { //TODO: now it's rotation dependent, it shouldn't cVector3d result; float hapticWorkSpaceRadius = 1.f; if(numHapticDevices > 0) hapticWorkSpaceRadius = hapticDevices[0]->getSpecifications().m_workspaceRadius; result.x = toolCoord.y / (hapticWorkSpaceRadius * 2.f) * VOL_X; result.y = toolCoord.x / (hapticWorkSpaceRadius * 2.f) * VOL_Y; result.z = toolCoord.z / (hapticWorkSpaceRadius * 2.f) * VOL_Z; /* %%%Before cVector3d result; result.x = (toolCoord.y * VOL_X) / 0.4f; result.y = (-toolCoord.x * VOL_Y) / 0.4f; result.z = (toolCoord.z * VOL_Z) / 0.4f; */ return result; } void setCameraToolVector() { if(numHapticDevices > 0) { //Save previous camera before doing anything scene.setPreviousCameraCenter(scene.getCamera()); //Get vector: camera center -> tool hapticDevices[0]->getPosition(posTool); posTool = toolCoord2VolCoord(posTool); vect3d convertedPosTool(posTool.x, posTool.y, posTool.z); //Vector: camera center -> tool vCameraToolVect = convertedPosTool - *scene.getPreviousCameraCenter(); } } //Use the camera tool vector to set the new position of the tool object in GPU so it does not move //when transforms are made to the scene void setToolPositionGPU() { if(numHapticDevices > 0) { //TODO: for now, just rotation //Transform cameraToolVect with the same transformations as the ones for the camera vect3d posNewCameraTool; vect3d vtmp; mat_rot(vCameraToolVect, vtmp); //Add this vector to the camera center point vect3d *posCamera = scene.computeCameraCenter(scene.getCamera()); posNewCameraTool = *posCamera + vtmp; //Generate vector: cameraToolVect -> posNewCameraTool vect3d vOldToolNewTool = posNewCameraTool - (vCameraToolVect + *scene.getPreviousCameraCenter()); //Translate the object in the GPU setObjectCenterGPU(vOldToolNewTool, 1); } } //Just detect the tool's position and pass it to the GPU void moveToolPositionGPU() { if(numHapticDevices > 0) { //Transform cameraToolVect with the same transformations as the ones for the camera static cVector3d previousPosTool(0,0,0); cVector3d translation = posTool - previousPosTool; vect3d convertedTranslation(translation.x, translation.y, translation.z); //Translate the object in the GPU translateObjectGPU(convertedTranslation, 1); previousPosTool = posTool; } } //Just detect the tool's position and translate coordinates to volume coordinates void moveToolPositionCPU() { if(numHapticDevices > 0) { //Transform cameraToolVect with the same transformations as the ones for the camera cVector3d newPosTool; //Get vector: camera center -> tool hapticDevices[0]->getPosition(newPosTool); //Account for the tool's actual imprecisions //%%%newPosTool.mul(5); newPosTool = toolCoord2VolCoord(newPosTool); posTool = newPosTool; } } void updateHapticGraphics(void) { // update content of position label // read position of device an convert into millimeters if(numHapticDevices > 0) { //This is for drawing from the volume's camera moveToolPositionGPU(); //This is for drawing from the haptic's camera cVector3d pos; hapticDevices[0]->getPosition(pos); pos.mul(5); // create a string that concatenates the device number and its position. string strID; cStr(strID, 0); string strLabel = "#" + strID + " x: "; cStr(strLabel, pos.x, 2); strLabel = strLabel + " y: "; cStr(strLabel, pos.y, 2); strLabel = strLabel + " z: "; cStr(strLabel, pos.z, 2); labels[0]->m_string = strLabel; } //TODO: need to draw it correctly // camera->renderView(displayW, displayH); // check for any OpenGL errors GLenum err; err = glGetError(); if (err != GL_NO_ERROR) printf("Error: %s\n", gluErrorString(err)); } void ResetForces() { //Vibration object0->m_material.setVibrationFrequency(0); object0->m_material.setVibrationAmplitude(0); //Friction object0->m_material.setStiffness(0); object0->m_material.setStaticFriction(0); object0->m_material.setViscosity(0); } void updateHaptics(void) { // main haptic simulation loop while(simulationRunning) { if(numHapticDevices > 0) { // read position of haptic device cVector3d newPosition; hapticDevices[0]->getPosition(newPosition); // read orientation of haptic device cMatrix3d newRotation; hapticDevices[0]->getRotation(newRotation); // update position and orientation of cursor cursors[0]->setPos(newPosition); cursors[0]->setRot(newRotation); // read linear velocity from device cVector3d linearVelocity; hapticDevices[0]->getLinearVelocity(linearVelocity); // update arrow velocityVectors[0]->m_pointA = newPosition; velocityVectors[0]->m_pointB = cAdd(newPosition, linearVelocity); // read user button status bool buttonStatus; hapticDevices[0]->getUserSwitch(0, buttonStatus); // adjustthe color of the cursor according to the status of // the user switch (ON = TRUE / OFF = FALSE) if (buttonStatus) { cursors[0]->m_material = matCursorButtonON; } else { cursors[0]->m_material = matCursorButtonOFF; } //get value from data at the position of the tool (the converted position) moveToolPositionCPU(); float dataRange = fEnd-fStart; cVector3d newForce (0,0,0); float val = getElecCellValue(posTool.x + VOL_X/2.f, posTool.y + VOL_Y/2.f, posTool.z+VOL_Z/2.f, hostData, hostIdData) / dataRange; //printf("%f\n",val); // set haptic properties according to the voxel inside the volume // NOTE that there are two ways this is being done, first, object0 // has some properties, then some forces will be applied through the // tool variable and also some forces are applied directly to the // haptic device through hapticDevices[0]->setForce() if(typeOfForce == 1) { ResetForces(); //Vibration object0->m_material.setVibrationFrequency(50.f); object0->m_material.setVibrationAmplitude(1.0 * forceMax * val); } //Magnetic Force //object0->m_material.setStiffness(0.1 * stiffnessMax * val); //object0->m_material.setMagnetMaxForce(0.1 * 2000.0 * val); //object0->m_material.setMagnetMaxDistance(0.05); //object0->m_material.setViscosity(1.0 * dampingMax); else if(typeOfForce == 2) { ResetForces(); //Friction object0->m_material.setStiffness(0.1 * stiffnessMax * val); object0->m_material.setDynamicFriction(1.0 * 2000.0 * val); object0->m_material.setViscosity(1.0 * dampingMax); } else if (typeOfForce == 3) { ResetForces(); //Vibration object0->m_material.setVibrationFrequency(50.f); object0->m_material.setVibrationAmplitude(1.0 * forceMax * val); //Friction object0->m_material.setStiffness(0.1 * stiffnessMax * val); object0->m_material.setStaticFriction(1.0 * 2000.0 * val); object0->m_material.setViscosity(1.0 * dampingMax); } /*Question: which one reveals more the high value areas? Which one reveals more the structure of the rod?*/ // apply force field if (typeOfForce == 0) { //Compute force double Kp = 2000.0 * val; // [N/m] cVector3d force = cMul(-Kp, newPosition); newForce.add(force); //Damp cHapticDeviceInfo info = hapticDevices[0]->getSpecifications(); double Kv = info.m_maxLinearDamping*val; cVector3d force2 = cMul(-Kv, linearVelocity); newForce.add(force2); } // compute global reference frames for each object world->computeGlobalPositions(true); // 4 position and orientation of tool tool->updatePose(); // compute interaction forces tool->computeInteractionForces(); if(typeOfForce == 0) { // send computed force to haptic device (direct forces) hapticDevices[0]->setForce(newForce); } else { // send forces to device (like vibration) tool->applyForces(); } } } // exit haptics thread simulationFinished = true; } //***Haptic Functions End*** //Shader //shader variables GLuint fragShader; GLuint vertShader; GLuint program; GLint fragCompiled; GLint vertCompiled; const char *vertProgram; const char *fragProgram; void setupShaders() { if (!GL_ARB_vertex_program) { printf("No shaders!"); return; } FILE *file; file = fopen("./blend.frag","r"); if(file==NULL) { MessageBox(NULL,"Couldn't open frag file.","ERROR",MB_OK|MB_ICONEXCLAMATION); exit(0); } char *fragProg; int size=0; fseek(file, 0, SEEK_END); size = ftell(file)+1; fragProg = new char[size]; fseek(file, 0, SEEK_SET); size = fread(fragProg,1,size,file); fragProg[size]='\0'; fclose(file); fragProgram = fragProg; file = fopen("blend.vert","r"); if(file==NULL) { MessageBox(NULL,"Couldn't open vert file.","ERROR",MB_OK|MB_ICONEXCLAMATION); exit(0); } char *vertProg; size=0; fseek(file, 0, SEEK_END); size = ftell(file)+1; vertProg = new char[size]; fseek(file, 0, SEEK_SET); size = fread(vertProg,1,size,file); vertProg[size]='\0'; fclose(file); vertProgram = vertProg; vertShader = glCreateShader(GL_VERTEX_SHADER); fragShader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(fragShader, 1, &fragProgram, NULL); glShaderSource(vertShader, 1, &vertProgram, NULL); glCompileShader(vertShader); // getErrors(); // printShaderInfoLog(vertShader); glGetShaderiv(vertShader, GL_COMPILE_STATUS, &vertCompiled); glCompileShader(fragShader); // getErrors(); // printShaderInfoLog(fragShader); glGetShaderiv(fragShader, GL_COMPILE_STATUS, &fragCompiled); program = glCreateProgram(); glAttachShader(program, vertShader); glAttachShader(program, fragShader); glLinkProgram(program); glUseProgram(program); } void drawAxes() { glDisable(GL_TEXTURE_2D); glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(-100.f, 100.f, -100.f, 100.f, -10000.f, 10000.f); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(2); glShadeModel(GL_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glPushMatrix(); glRotatef(2.f, 1, 1, 0); glBegin(GL_LINES); { glColor4f(1.f, 0, 0, 0.2f); glVertex3f(-100.f, 0,0); glVertex3f(100.f,0,0); glColor4f(0, 1.f, 0, 0.2f); glVertex3f(0, -100.f,0); glVertex3f(0,100.f,0); glColor4f(0, 0, 1.f, 0); glVertex3f(0, 0,-1000.f); glColor4f(0, 0, 1.f, 1.f); glVertex3f(0,0,1000.f); } glEnd(); glPopMatrix(); glLineWidth(1); glDisable(GL_BLEND); glMatrixMode(GL_PROJECTION); glPopMatrix(); glMatrixMode(GL_MODELVIEW); glPopMatrix(); glEnable(GL_TEXTURE_2D); // check error GLenum err = glGetError(); if(err != GL_NO_ERROR) { printf("[GL ERROR] %s - %d : 0x%x\n", __FILE__, __LINE__, err); } } void drawCap() { glDisable(GL_TEXTURE_2D); glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(-1000.f, 1000.f, -1000.f, 1000.f, -10000.f, 10000.f); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(2); glShadeModel(GL_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glPushMatrix(); glBegin(GL_TRIANGLES); { for(int i =0; i < pCap0->getTriCount(); i++) { glVertex3f( pCap0->getTriangle(i)->_vertices->data[0], pCap0->getTriangle(i)->_vertices->data[1], pCap0->getTriangle(i)->_vertices->data[2]); } } glEnd(); glPopMatrix(); glLineWidth(1); glDisable(GL_BLEND); glMatrixMode(GL_PROJECTION); glPopMatrix(); glMatrixMode(GL_MODELVIEW); glPopMatrix(); glEnable(GL_TEXTURE_2D); // check error GLenum err = glGetError(); if(err != GL_NO_ERROR) { printf("[GL ERROR] %s - %d : 0x%x\n", __FILE__, __LINE__, err); } } // static void resize(int w, int h) { //%%% displayW = w; displayH = h; //%%% glViewport(0, 0, w, h); //%%% // update position of labels rootLabels->setPos(10, displayH-70, 0); //%%% } static void destroy() { if(deviceData) { hipFree(deviceData); deviceData = NULL; } if(idData) { hipFree(idData); idData = NULL; } if(deviceTexData) { hipFree(deviceTexData); } nanoGeoDestroy(); nanoPlaneDestroy(); internalCap0Destroy(); internalCap1Destroy(); SliceDestroy(); global_destroy(); exit(EXIT_SUCCESS); } int iWinId; void idle() { glutSetWindow(iWinId); glutPostRedisplay(); } clock_t nTick = 0; GLUI_RadioGroup *pCMGroup = NULL; GLUI_EditText *pImgPath = NULL; // Zooming static int InitViewZ = 6100; const int MaxViewZ = 12000; const int MinViewZ = 100; static int nCurrViewZ = InitViewZ; static int nZoomStep = -10; static int nRotStep = 1; void zoom_cam(Camera *pCam, float deltaStep); void rotate_cam(Camera *pCam, float deltaAngle, vect3d &axis); void capture(); static int volCount = 0; static void display() { nTick = clock(); glClear(GL_COLOR_BUFFER_BIT); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0, 1, 0, 1); //%%% glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt( 0, 0, 1, 0, 0, 0, 0, 1, 0); glDisable(GL_DEPTH_TEST); //%%% scene.setTFMode(iTfMode); scene.compute(); scene.render( pCMGroup->get_int_val() == 0 ? NULL : pImgPath->get_text()); //%%% //Render Haptic graphcis updateHapticGraphics(); //%%% if(shouldDrawAxes) drawAxes(); glutSwapBuffers(); clock_t nCount = clock() - nTick; printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b FPS: %.4f", 1000.f / (nCount * 1.0)); hipError_t err = hipGetLastError(); if(err != hipSuccess) { printf("DUDE: %s \n", hipGetErrorString(err)); } // //if(volCount <= 360) //{ // capture(); // //nCurrViewZ += nZoomStep; // //zoom_cam(scene.getCamera(), -nZoomStep); // rotate_cam(scene.getCamera(), -nRotStep, vect3d(0, 0.3, 1)); //} } /// Rotation static void rotate_cam(Camera *pCam, float deltaAngle, vect3d &axis) { PerpCamera *pPCam = dynamic_cast<PerpCamera *>(pCam); // Rotate deltaAngle *= - PIon180; set_matrix(sinf(deltaAngle), cosf(deltaAngle), axis); vect3d tmp; mat_rot(pPCam->_eyePos, tmp); vecCopy(pPCam->_eyePos, tmp); mat_rot(pPCam->_ctrPos, tmp); vecCopy(pPCam->_ctrPos, tmp); mat_rot(pPCam->_upVec, tmp); vecCopy(pPCam->_upVec, tmp); mat_rot(pPCam->_rightVec, tmp); vecCopy(pPCam->_rightVec, tmp); mat_rot(pPCam->_dir, tmp); vecCopy(pPCam->_dir, tmp); } static void zoom_cam(Camera *pCam, float deltaStep) { PerpCamera *pPCam = dynamic_cast<PerpCamera *>(pCam); vect3d deltaVec; vect3d eye(pPCam->_eyePos[0], pPCam->_eyePos[1], pPCam->_eyePos[2]); vect3d ctr(pPCam->_ctrPos[0], pPCam->_ctrPos[1], pPCam->_ctrPos[2]); vect3d viewVec; points2vec(eye, ctr, viewVec); vecCopy(deltaVec, viewVec); normalize(deltaVec); vecScale(deltaVec, deltaStep, deltaVec); //printf("->%.3f,%.3f,%.3f \n", viewVec.data[0], viewVec.data[0], viewVec.data[0]); point2point(pPCam->_eyePos, deltaVec, pPCam->_eyePos); } static void specialKey(int key, int x, int y) { switch(key) { /// /// Zooming /// case GLUT_KEY_UP: //if( (nCurrViewZ - nZoomStep) >= MinViewZ) { nCurrViewZ -= nZoomStep; zoom_cam(scene.getCamera(), nZoomStep); printf("Zoom-in to %d\n", nCurrViewZ); } break; case GLUT_KEY_DOWN: //if( (nCurrViewZ + nZoomStep) <= MaxViewZ) { nCurrViewZ += nZoomStep; zoom_cam(scene.getCamera(), -nZoomStep); printf("Zoom-out to %d\n", nCurrViewZ); } break; /// /// Rotation /// case GLUT_KEY_RIGHT: printf(" Right Rot: %d\n", nRotStep); rotate_cam(scene.getCamera(), -nRotStep, vect3d(0, 0.3, 1)); break; case GLUT_KEY_LEFT: printf(" Left Rot: %d\n", -nRotStep); rotate_cam(scene.getCamera(), nRotStep, vect3d(0, 0.3, 1)); break; } } void capture() { // Taking & Saving the screenshot if(ilutGLScreen()) { ilEnable(IL_FILE_OVERWRITE); char path[20] = {0}; sprintf(path, "Y:/tony/vol_%d.jpg", volCount ++); if(ilSaveImage(path)) { printf("Screenshot saved successfully as \'%s\'!\n", path); } else { printf("Sorry, DevIL cannot save your screenshot...\n"); } } else { printf(" Sorry man, DevIL screenshot taking failed...\n"); } } static void key(unsigned char key, int x, int y) { PerpCamera *pCam = (PerpCamera*)scene.getCamera(); switch (key) { case 'm': printf("DIR: %.5f. %.5f. %.5f\n", pCam->_dir[0], pCam->_dir[1], pCam->_dir[2]); printf("CTR: %.5f. %.5f. %.5f\n", pCam->_eyePos[0], pCam->_eyePos[1], pCam->_eyePos[2]); printf("UP : %.5f. %.5f. %.5f\n", pCam->_upVec[0], pCam->_upVec[1], pCam->_upVec[2]); break; case 'c': case 'C': capture(); break; case 27 : case 'q': destroy(); break; } //glutPostRedisplay(); } void printUsage() { char *strUsage = "{ How to Use } \n\n" " Wheel Up\\Down to Zoom-in\\out \n" " Mouse Drag to Rotate \n" " C: save image \n" " Q: quit \n\n"; printf(strUsage); } /// Radio Group Callback GLUI_RadioGroup *pGroup = NULL; GLUI_Panel *pHPPal = NULL; void file_callback(int pParam) { printf("Image Path: %s\n", pImgPath->get_text()); loadTexture(pImgPath->get_text()); } GLUI_Panel *pClrP = NULL; GLUI_Panel *pClrChooseP = NULL; GLUI_Panel *pFileChooseP = NULL; void color_map_choice_callback(int pParam) { int val = pCMGroup->get_int_val(); switch(val) { case 0: // Values pClrP->enable(); pClrChooseP->enable(); pFileChooseP->disable(); break; case 1: // Picture pClrP->disable(); pClrChooseP->disable(); pFileChooseP->enable(); #ifndef DATA_2D pFileChooseP->disable(); #endif loadTexture(pImgPath->get_text()); break; } mMode = val; } void radio_group_callback(int pParam) { int val = pGroup->get_int_val(); switch(val) { case 0: pHPPal->disable(); printf("\nAverage Mode Selected.\n"); break; case 1: pHPPal->disable(); printf("\nSolid Mode Selected.\n"); break; case 2: // hermite pHPPal->enable(); printf("\nHermite Mode Selected.\n"); break; case 3: pHPPal->disable(); printf("\nFirst Mode Selected.\n"); break; } iTfMode = val; } static int pressX = -1; static int pressY = -1; void myGlutMouse(int button, int button_state, int x, int y) { // Zooming // if(button==GLUT_WHEEL_DOWN) { if( (nCurrViewZ + nZoomStep) <= MaxViewZ) { nCurrViewZ += nZoomStep; zoom_cam(scene.getCamera(), -nZoomStep); printf("Zoom-out to %d\n", nCurrViewZ); } } if(button==GLUT_WHEEL_UP) { if( (nCurrViewZ - nZoomStep) >= MinViewZ) { nCurrViewZ -= nZoomStep; zoom_cam(scene.getCamera(), nZoomStep); printf("Zoom-in to %d\n", nCurrViewZ); } } // Rotating // if (button==GLUT_LEFT_BUTTON && button_state==GLUT_DOWN) { pressX = x; pressY = y; //%%% //generate a vector from camera to tool // setCameraToolVector(); //%%% } if (button==GLUT_LEFT_BUTTON && button_state==GLUT_UP) { int dx = x - pressX; int dy = pressY - y; if( (abs(dx) + abs(dy)) > 3 ) { float fSpeed = 0.2; Camera *pCam = scene.getCamera(); // Drag Vec vect3d dragVec; vect3d tmpHori, tmpVert; vecScale(pCam->_rightVec, dx, tmpHori); vecScale(pCam->_upVec, dy, tmpVert); point2point(dragVec, tmpHori, dragVec); point2point(dragVec, tmpVert, dragVec); float dragLen = sqrt( (float)dx * dx + (float)dy * dy ); vect3d axis(0, 1, 0); cross_product(dragVec, pCam->_dir, axis); normalize(axis); rotate_cam(pCam, fSpeed * dragLen, axis); //%%% //Assigns new tool position // setToolPositionGPU(); //%%% } } } /// #include "data_loader.cu" /// /// /// int main(int argc, char* argv[]) { hipGLSetGLDevice(0); printUsage(); // Window Setup glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(WinWidth, WinHeight); glutInitWindowPosition(WinLeft, WinTop); iWinId = glutCreateWindow(WinTitle); glutReshapeFunc(resize); glutDisplayFunc(display); glutIdleFunc(idle); glutKeyboardFunc(key); glutSpecialFunc(specialKey); // scene.init(); global_init(); atexit(destroy); #if 1 /// /// Load Files and copy data to GPU /// const unsigned x_dim = VOL_X; const unsigned y_dim = VOL_Y; const unsigned z_dim = VOL_Z; hipMalloc(&deviceData, sizeof(float) * 3 * x_dim * y_dim * z_dim); // x, y, z if(deviceData == NULL) { printf("CUDA Memory Allocation failed...\n"); system("pause"); } hipMalloc(&idData, sizeof(int) * x_dim * y_dim * z_dim); // x, y, z if(idData == NULL) { printf("CUDA Memory Allocation failed...\n"); system("pause"); } //%%%Initialize host data for haptic use hostData = (float*)malloc(sizeof(float) * 3 * x_dim * y_dim * z_dim); hostIdData = (int*)malloc(sizeof(int) * x_dim * y_dim * z_dim); //%%% #ifndef DATA_2D bool bLoaded = loadData( x_dim, y_dim, z_dim, DATA_PATH, deviceData, idData, hostData, hostIdData); #else bool bLoaded = loadData2D( x_dim, y_dim, z_dim, DATA_PATH, deviceData, hostData); #endif scene.setElecData(deviceData, idData); #endif // GLUI GLUI *glui = GLUI_Master.create_glui( "Param Control", 0, WinWidth + WinLeft, WinTop ); // Kernel Part // { GLUI_Panel *pPal0 = glui->add_panel("Nanorod Param"); // Multi-sample Count GLUI_Spinner *pMS = glui->add_spinner_to_panel(pPal0, "Anti-Alias #", GLUI_SPINNER_INT, &nMultiSampleCount); pMS->set_int_limits(1, 40); pMS->set_speed(1); #ifndef DATA_2D pMS->disable(); #endif // Sampling Dist Radius GLUI_Spinner *pDF = glui->add_spinner_to_panel(pPal0, "Sampling Rad #", GLUI_SPINNER_FLOAT, &fSamplingDeltaFactor); pDF->set_float_limits(0.01, 60); pDF->set_speed(1); #ifndef DATA_2D pDF->disable(); #endif // Show Geo GLUI_Checkbox *pGeoChk = glui->add_checkbox_to_panel(pPal0, "Show Nanorod", &bShowGeo); GLUI_Checkbox *pRodChk = glui->add_checkbox_to_panel(pPal0, "Data in Nanorod", &bOnlyInRod); //%%% Type of force to be rendered GLUI_Listbox *pListForce = glui->add_listbox_to_panel(pPal0, "Type of Force", &typeOfForce); { pListForce->add_item(0, "Force 1"); pListForce->add_item(1, "Force 2"); pListForce->add_item(2, "Force 3"); pListForce->add_item(3, "Force 2 & 3"); } GLUI_Checkbox *drawAxesChk = glui->add_checkbox_to_panel(pPal0, "Draw axes", &shouldDrawAxes); //%%% GLUI_Spinner *pNanoAlpha = glui->add_spinner_to_panel(pPal0, "Nanorod Alpha", GLUI_SPINNER_FLOAT, &fNanoAlpha); pNanoAlpha->set_float_limits(0, 1); GLUI_Checkbox *pSliceChk = glui->add_checkbox_to_panel(pPal0, "Show Slice", &bShowSlice); GLUI_Checkbox *pPlaneChk = glui->add_checkbox_to_panel(pPal0, "Show Plane", &bShowPlane); GLUI_Spinner *pPlaneAlpha = glui->add_spinner_to_panel(pPal0, "Plane Alpha", GLUI_SPINNER_FLOAT, &fPlaneAlpha); pNanoAlpha->set_float_limits(0, 1); GLUI_Panel *pTFPal = glui->add_panel("Transfer Function"); #ifndef DATA_2D pTFPal->disable(); #endif pGroup = glui->add_radiogroup_to_panel(pTFPal, NULL, -1, radio_group_callback); glui->add_radiobutton_to_group( pGroup, "Average" ); glui->add_radiobutton_to_group( pGroup, "Solid" ); glui->add_radiobutton_to_group( pGroup, "Hermite" ); pHPPal = glui->add_panel("Hermite Param"); glui->add_edittext_to_panel(pHPPal, "P0 Val", GLUI_EDITTEXT_FLOAT, &fP0_val); glui->add_edittext_to_panel(pHPPal, "P0 Deriv", GLUI_EDITTEXT_FLOAT, &fP0_der); glui->add_edittext_to_panel(pHPPal, "P1 Val", GLUI_EDITTEXT_FLOAT, &fP1_val); glui->add_edittext_to_panel(pHPPal, "P1 Deriv", GLUI_EDITTEXT_FLOAT, &fP1_der); pHPPal->disable(); } { GLUI *glui2 = GLUI_Master.create_glui( "", 0, WinWidth + WinLeft + 200, WinTop); // GLUI_Panel *pPalP = glui2->add_panel("Clip Plane"); glui2->add_checkbox_to_panel(pPalP, "Enable", &bClipPlaneEnabled); glui2->add_statictext_to_panel(pPalP, "Plane Center"); GLUI_Spinner *pc0 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeCtr + 0); pc0->set_float_limits(-(VOL_X/2), (VOL_X/2)); GLUI_Spinner *pc1 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeCtr + 1); pc1->set_float_limits(-(VOL_Y/2), (VOL_Y/2)); GLUI_Spinner *pc2 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeCtr + 2); pc2->set_float_limits(-(VOL_Z/2), (VOL_Z/2)); glui2->add_statictext_to_panel(pPalP, "Plane Normal"); GLUI_Spinner *pn0 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeNorm + 0); GLUI_Spinner *pn1 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeNorm + 1); GLUI_Spinner *pn2 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeNorm + 2); // GLUI_Panel *pSelP = glui2->add_panel("Data Selection"); #ifndef DATA_2D pSelP->disable(); #endif glui2->add_checkbox_to_panel(pSelP, "ID 1", mark + 0); glui2->add_checkbox_to_panel(pSelP, "ID 2", mark + 1); glui2->add_checkbox_to_panel(pSelP, "ID 3", mark + 2); glui2->add_checkbox_to_panel(pSelP, "ID 4", mark + 3); } { GLUI *glui3 = GLUI_Master.create_glui( "", 0, WinWidth + WinLeft + 400, WinTop); GLUI_Panel *pChoiceP = glui3->add_panel("Choice"); #ifndef DATA_2D pChoiceP->disable(); #endif pCMGroup = glui->add_radiogroup_to_panel(pChoiceP, NULL, -1, color_map_choice_callback); glui3->add_radiobutton_to_group( pCMGroup, "Values"); glui3->add_radiobutton_to_group( pCMGroup, "Picture"); pCMGroup->set_int_val(mMode); glui3->add_separator(); pClrP = glui3->add_panel("Color Map Values"); GLUI_Spinner *pcl0 = glui3->add_spinner_to_panel(pClrP, "Val-0", GLUI_SPINNER_FLOAT, knotValues + 0); pcl0->set_float_limits(-20, 20); GLUI_Spinner *pcl1 = glui3->add_spinner_to_panel(pClrP, "Val-1", GLUI_SPINNER_FLOAT, knotValues + 1); pcl1->set_float_limits(-20, 20); GLUI_Spinner *pcl2 = glui3->add_spinner_to_panel(pClrP, "Val-2", GLUI_SPINNER_FLOAT, knotValues + 2); pcl2->set_float_limits(-20, 20); GLUI_Spinner *pcl3 = glui3->add_spinner_to_panel(pClrP, "Val-3", GLUI_SPINNER_FLOAT, knotValues + 3); pcl3->set_float_limits(-20, 20); GLUI_Spinner *pcl4 = glui3->add_spinner_to_panel(pClrP, "Val-4", GLUI_SPINNER_FLOAT, knotValues + 4); pcl4->set_float_limits(-20, 20); // pClrChooseP = glui3->add_panel("Color Map Colors"); #ifndef DATA_2D pClrChooseP->disable(); #endif GLUI_Listbox *pList0 = glui3->add_listbox_to_panel(pClrChooseP, "Color 0", knotColors + 0); { pList0->add_item(0, "White"); pList0->add_item(1, "Black"); pList0->add_item(2, "Red"); pList0->add_item(3, "Orange"); pList0->add_item(4, "Yellow"); pList0->add_item(5, "Green"); pList0->add_item(6, "Cyan"); pList0->add_item(7, "Blue"); pList0->add_item(8, "Purple"); pList0->add_item(9, "Gray"); } pList0->set_int_val(knotColors[0]); GLUI_Listbox *pList1 = glui3->add_listbox_to_panel(pClrChooseP, "Color 1", knotColors + 1); { pList1->add_item(0, "White"); pList1->add_item(1, "Black"); pList1->add_item(2, "Red"); pList1->add_item(3, "Orange"); pList1->add_item(4, "Yellow"); pList1->add_item(5, "Green"); pList1->add_item(6, "Cyan"); pList1->add_item(7, "Blue"); pList1->add_item(8, "Purple"); pList1->add_item(9, "Gray"); } pList1->set_int_val(knotColors[1]); GLUI_Listbox *pList2 = glui3->add_listbox_to_panel(pClrChooseP, "Color 2", knotColors + 2); { pList2->add_item(0, "White"); pList2->add_item(1, "Black"); pList2->add_item(2, "Red"); pList2->add_item(3, "Orange"); pList2->add_item(4, "Yellow"); pList2->add_item(5, "Green"); pList2->add_item(6, "Cyan"); pList2->add_item(7, "Blue"); pList2->add_item(8, "Purple"); pList2->add_item(9, "Gray"); } pList2->set_int_val(knotColors[2]); GLUI_Listbox *pList3 = glui3->add_listbox_to_panel(pClrChooseP, "Color 3", knotColors + 3); { pList3->add_item(0, "White"); pList3->add_item(1, "Black"); pList3->add_item(2, "Red"); pList3->add_item(3, "Orange"); pList3->add_item(4, "Yellow"); pList3->add_item(5, "Green"); pList3->add_item(6, "Cyan"); pList3->add_item(7, "Blue"); pList3->add_item(8, "Purple"); pList3->add_item(9, "Gray"); } pList3->set_int_val(knotColors[3]); GLUI_Listbox *pList4 = glui3->add_listbox_to_panel(pClrChooseP, "Color 4", knotColors + 4); { pList4->add_item(0, "White"); pList4->add_item(1, "Black"); pList4->add_item(2, "Red"); pList4->add_item(3, "Orange"); pList4->add_item(4, "Yellow"); pList4->add_item(5, "Green"); pList4->add_item(6, "Cyan"); pList4->add_item(7, "Blue"); pList4->add_item(8, "Purple"); pList4->add_item(9, "Gray"); } pList4->set_int_val(knotColors[4]); glui3->add_separator(); pFileChooseP = glui3->add_panel("File Choose"); pImgPath = glui3->add_edittext_to_panel(pFileChooseP, "Img Path", GLUI_EDITTEXT_TEXT, NULL, -1, file_callback); pImgPath->set_text(CM_IMG); glui3->add_spinner_to_panel(pFileChooseP, "Start", GLUI_SPINNER_FLOAT, &fStart); glui3->add_spinner_to_panel(pFileChooseP, "End", GLUI_SPINNER_FLOAT, &fEnd); #ifndef DATA_2D pFileChooseP->disable(); #endif } GLUI_Master.set_glutIdleFunc(idle); GLUI_Master.set_glutMouseFunc(myGlutMouse); /// /// Setup Scene /// { /// Camera /// CamType eCamType = PERSP; SamplingType eSplType = STRATIFIED; //// Top //vect3d ctr(0, 0, InitViewZ); //vect3d view(0, 0, -1); //vect3d up(0, 1, 0); //// Bottom //vect3d ctr(0, 0, -InitViewZ); //vect3d view(0, 0, 1); //vect3d up(0, 1, 0); //// Side //vect3d ctr(InitViewZ, 0, 0); //vect3d view(-1, 0, 0); //vect3d up(0, 0, 1); //// 90 //vect3d ctr(0, InitViewZ, 0); //vect3d view(0, -1, 0); //vect3d up(0, 0, 1); //// look-down-cap //vect3d ctr(0, InitViewZ, InitViewZ * 0.3 - 20); //vect3d view(0, -1, -0.3); //vect3d up(0, 0.3, 1); //// cut-plane //vect3d ctr(5539.27002, 4021.99023, -929.16742); //vect3d view(-121.07777, -87.91293, 20.30981); //vect3d up(0.53435, 0.43612, 5.07333); //// Poly-Plane // vect3d view(-194.79440, 156.42287, -24.24892); // vect3d ctr(5516.48730, -4429.82227, 686.71802); // vect3d up(-0.36831, 0.33132, 5.09597); // Poly-Piece // vect3d view(15.01775, 179.30638, 89.58064); vect3d ctr(-531.08990, -6341.02490, -3167.94580); vect3d up(-0.18784, -2.27411, 4.58339); //// SQW - view //vect3d view(13.06205, -26.43243, -9.57651); //vect3d ctr(-2612.41113, 5286.48486, 1915.30151); //vect3d up(0.66073, -1.43767, 4.86939); // Set Camera Camera *pCam = NULL; switch(eCamType) { case PERSP: pCam = new PerpCamera(300, ctr, up, view, 500, ViewPlaneRatio); break; case ORTHO: pCam = new OrthoCamera(ctr, up, view, 10, ViewPlaneRatio); break; } pCam->setSampler(eSplType); pCam->setMultiSamplingCount(nMultiSampleCount); scene.setCamera(pCam); scene.setAmbiColor(vect3d(0,0,0)); /// Generate Rays for GPU /// sendConstants2GPU(); /// Volume Cube /// vect3d cubeCtr(0, 0, 0); vect3d vertVec(0, 1, 0); vect3d horiVec(0, 0, 1); Cube *pCube0 = new Cube(x_dim, z_dim, y_dim, cubeCtr, vertVec, horiVec); scene.addObject(pCube0); #ifndef DATA_2D //%%%Add my little cube representing the tool Cube *pCubeTool = new Cube(x_dim / 30.f, z_dim / 46.8f, y_dim / 30.f, cubeCtr, vertVec, horiVec); scene.addObject(pCubeTool); scene.setDataDim(x_dim, y_dim, z_dim); #endif int half_x = x_dim / 2; int half_y = y_dim / 2; int half_z = z_dim / 2; Tracer::setVolBBox( - half_x - 1, half_x + 1, - half_y - 1, half_y + 1, - half_z - 1, half_z + 1); copySceneGeomotry(); #if 1 //c10w20 float factorNano = 0.90;//1 float factorSlice = factorNano; float factorCap0 = 0.63;//0.9 float factorCap1 = 0.5; float factorPlane = factorNano; float offset = 0; //50; //43;//34.5; //float factorNano = 1;//1 //float factorSlice = factorNano; //float factorCap0 = factorNano;//0.9 //float factorCap1 = factorNano; //float factorPlane = factorNano; //float offset = 34.5; #else float factorNano = 1; float factorSlice = 1; float factorCap0 = 1; float factorCap1 = 1; float factorPlane = 1; float offset = 0; #endif /// /// Nanorod Geometry /// printf("- Loading Nanorod Geometry ..."); { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pObj->load("nanorod.obj"); pObj->setSmooth(false); vect3d spec(1,1,1); vect3d diff(1,1,1); vect3d ambi(1,1,1); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorNano, 0.82 * factorNano, 0.70 * factorNano); pObj->translate(0,0,-3 - offset); printf("Done \n"); printf("- Transfering Nanorod Geometry to GPU..."); copyNanoGeo(pObj, offset); printf("Done \n"); } printf("- Loading Slice Geometry ..."); { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pObj->load("slice.obj"); pObj->setSmooth(false); vect3d spec(1,1,1); vect3d diff(1,1,1); vect3d ambi(0.2,0.2,0.2); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); vect3d axis0(0, 0, 1); float angle0 = -120; pObj->rotate(angle0, axis0); pObj->scale(0.82 * factorSlice, 0.82 * factorSlice, 0.70 * factorSlice); pObj->translate(0,0,-3 - offset); printf("Done \n"); printf("- Transfering Slice Geometry to GPU..."); copySlice(pObj); printf("Done \n"); } /// 1. Internal Cap 0 { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pCap0 = pObj; pObj->load("nanorod.obj"); pObj->setSmooth(false); vect3d spec(0.3,0.3,0.3); vect3d diff(0.3, 0.3, 0.3); vect3d ambi(0.3, 0.3, 0.3); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorCap0, 0.82 * factorCap0, 0.70 * factorCap0); pObj->translate(0,0,-3-offset); printf("Done \n"); printf("- Transfering Internal Cap Geometry to GPU..."); copyInternalCap0(pObj, offset); printf("Done \n"); } /// 1. Internal Cap 1 { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pObj->load("nanorod.obj"); pObj->setSmooth(false); vect3d spec(0.3,0.3,0.3); vect3d diff(0.3,0.3,0.3); vect3d ambi(0.3, 0.3, 0.3); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorCap1, 0.82 * factorCap1, 0.70 * factorCap1); pObj->translate(0,0,-3-offset); printf("Done \n"); printf("- Transfering Internal Cap Geometry to GPU..."); copyInternalCap1(pObj, offset); printf("Done \n"); } /// Load nanoPlane { ObjObject *pObj = new ObjObject(0, 0, 0, 0); #ifndef DATA_2D pObj->load("nanoPlane.obj"); #else pObj->load("2d_plane.obj"); #endif pObj->setSmooth(false); vect3d spec(0.3,0.3,0.3); vect3d diff(0.3,0.3,0.3); vect3d ambi(1,1,1); pObj->setMaterial(spec, diff, ambi, 70); #ifndef DATA_2D vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorPlane * 1.01, 0.82 * factorPlane * 1.01, 0.70 * factorPlane * 1.01); pObj->translate(0,0,-3 - offset); #else pObj->scale(0.96,1,1); pObj->translate(-4.5,0, 3); #endif printf("Done \n"); printf("- Transfering NanoPlane Geometry to GPU..."); copyNanoPlane(pObj, offset); printf("Done \n"); } } color_map_choice_callback(0); #ifndef DATA_2D //%%%Haptics initHaptic(); //%%% setupShaders(); #endif /// Go nTick = clock(); glutMainLoop(); //%%% closeHaptic(); //%%% destroy(); return EXIT_SUCCESS; }
ab1c0dc7e84b2bc658f691176fcdb88075bb424e.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include "windows.h" #include <time.h> #include "GL/glee.h" #include "consts.h" #include "nanorod.h" #include "global.h" #include "film.h" #include "tracer.h" #include "obj_object.h" #include "texture.h" #include "IL/ilut.h" #include "GL/glut.h" #include "GL/glui.h" #include "gpu_util.h" #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include "gpu_util.cu" #include "tracer.cu" #define _MSVC #include "cWorldVol.h" #include "cCameraVol.h" #include "chai3d/src/chai3d.h" /////////////////////////// float *deviceData = NULL; int *idData = NULL; /////////////////////////// // //***Haptic globals*** float *hostData = NULL; int *hostIdData = NULL; void initHaptic(); // function called before exiting the application void closeHaptic(void); // main graphics callback void updateHapticGraphics(void); // main haptics loop void updateHaptics(void); const int MAX_DEVICES = 1; // a world that contains all objects of the virtual environment cWorldVol* world; // a camera that renders the world in a window display cCameraVol* camera; // a light source to illuminate the objects in the virtual scene cLight *light; // a little "chai3d" bitmap logo at the bottom of the screen cBitmap* logo; // width and height of the current window display int displayW; int displayH; // a haptic device handler cHapticDeviceHandler* handler; // a table containing pointers to all haptic devices detected on this computer cGenericHapticDevice* hapticDevices[MAX_DEVICES]; // a table containing pointers to label which display the position of // each haptic device cLabel* labels[MAX_DEVICES]; cGenericObject* rootLabels; // number of haptic devices detected int numHapticDevices; // table containing a list of 3D cursors for each haptic device cShapeSphere* cursors[MAX_DEVICES]; // table containing a list of lines to display velocity cShapeLine* velocityVectors[MAX_DEVICES]; // material properties used to render the color of the cursors cMaterial matCursorButtonON; cMaterial matCursorButtonOFF; // status of the main simulation haptics loop bool simulationRunning; // root resource path string resourceRoot; // damping mode ON/OFF bool useDamping; // force field mode ON/OFF bool useForceField; // has exited haptics simulation thread bool simulationFinished; //Camera tool vector vect3d vCameraToolVect; //Toool position cVector3d posTool; // a virtual tool representing the haptic device in the scene cGeneric3dofPointer* tool; // a spherical object representing the volume so it can have material properties // and we can change them when volume values change cShapeSphere* object0; double stiffnessMax; double forceMax; double dampingMax; int typeOfForce = 0; int shouldDrawAxes = 1; ObjObject *pCap0; //***Haptic Globals end*** //***Haptic Functions*** void initHaptic() { displayW = 0; displayH = 0; numHapticDevices = 0; simulationRunning = false; useDamping = false; useForceField = true; simulationFinished = false; // create a new world. world = new cWorldVol(); // set the background color of the environment // the color is defined by its (R,G,B) components. world->setBackgroundColor(0.0, 0.0, 0.0); // create a camera and insert it into the virtual world camera = new cCameraVol(world); world->addChild(camera); // position and oriente the camera camera->set( cVector3d (0.5, 0.0, 0.0), // camera position (eye) cVector3d (0.0, 0.0, 0.0), // lookat position (target) cVector3d (0.0, 0.0, 1.0)); // direction of the "up" vector // set the near and far clipping planes of the camera // anything in front/behind these clipping planes will not be rendered camera->setClippingPlanes(0.01, 10.0); // create a light source and attach it to the camera light = new cLight(world); camera->addChild(light); // attach light to camera light->setEnabled(true); // enable light source light->setPos(cVector3d( 2.0, 0.5, 1.0)); // position the light source light->setDir(cVector3d(-2.0, 0.5, 1.0)); // define the direction of the light beam //----------------------------------------------------------------------- // HAPTIC DEVICES / TOOLS //----------------------------------------------------------------------- // create a haptic device handler handler = new cHapticDeviceHandler(); // read the number of haptic devices currently connected to the computer numHapticDevices = handler->getNumDevices(); // limit the number of devices to MAX_DEVICES numHapticDevices = cMin(numHapticDevices, MAX_DEVICES); // create a node on which we will attach small labels that display the // position of each haptic device rootLabels = new cGenericObject(); camera->m_front_2Dscene.addChild(rootLabels); // create a small label as title cLabel* titleLabel = new cLabel(); rootLabels->addChild(titleLabel); // define its position, color and string message titleLabel->setPos(0, 30, 0); titleLabel->m_fontColor.set(1.0, 1.0, 1.0); titleLabel->m_string = "Haptic Device Pos [mm]:"; // for each available haptic device, create a 3D cursor // and a small line to show velocity int i = 0; while (i < numHapticDevices) { // get a handle to the next haptic device cGenericHapticDevice* newHapticDevice; handler->getDevice(newHapticDevice, i); // open connection to haptic device newHapticDevice->open(); // initialize haptic device newHapticDevice->initialize(); // store the handle in the haptic device table hapticDevices[i] = newHapticDevice; // retrieve information about the current haptic device cHapticDeviceInfo info = newHapticDevice->getSpecifications(); // create a 3D tool and add it to the world tool = new cGeneric3dofPointer(world); world->addChild(tool); // connect the haptic device to the tool tool->setHapticDevice(hapticDevices[i]); // initialize tool by connecting to haptic device tool->start(); // map the physical workspace of the haptic device to a larger virtual workspace. tool->setWorkspaceRadius(1.0); // define a radius for the tool tool->setRadius(0.03); // read the scale factor between the physical workspace of the haptic // device and the virtual workspace defined for the tool double workspaceScaleFactor = tool->getWorkspaceScaleFactor(); // define a maximum stiffness that can be handled by the current // haptic device. The value is scaled to take into account the // workspace scale factor stiffnessMax = info.m_maxForceStiffness / workspaceScaleFactor; forceMax = info.m_maxForce; // define the maximum damping factor that can be handled by the // current haptic device. The The value is scaled to take into account the // workspace scale factor dampingMax = info.m_maxLinearDamping / workspaceScaleFactor; ///////////////////////////////////////////////////////////////////////// // OBJECT 0: "VIBRATIONS" //////////////////////////////////////////////////////////////////////// // temp variable cGenericEffect* newEffect; // create a sphere and define its radius object0 = new cShapeSphere(2.0); // add object to world world->addChild(object0); // set the position of the object at the center of the world object0->setPos(0.0, 0.0, 0.0); object0->setUseTexture(false); // create a haptic viscous effect newEffect = new cEffectVibration(object0); object0->addEffect(newEffect); newEffect = new cEffectSurface(object0); object0->addEffect(newEffect); //newEffect = new cEffectViscosity(object0); //object0->addEffect(newEffect); //newEffect = new cEffectMagnet(object0); //object0->addEffect(newEffect); // create a cursor by setting its radius cShapeSphere* newCursor = new cShapeSphere(0.01); // add cursor to the world world->addChild(newCursor); // add cursor to the cursor table cursors[i] = newCursor; // create a small line to illustrate velocity cShapeLine* newLine = new cShapeLine(cVector3d(0,0,0), cVector3d(0,0,0)); velocityVectors[i] = newLine; // add line to the world world->addChild(newLine); // create a string that concatenates the device number and model name. string strID; cStr(strID, i); string strDevice = "#" + strID + " - " +info.m_modelName; // attach a small label next to the cursor to indicate device information cLabel* newLabel = new cLabel(); newCursor->addChild(newLabel); newLabel->m_string = strDevice; newLabel->setPos(0.00, 0.02, 0.00); newLabel->m_fontColor.set(1.0, 1.0, 1.0); // if the device provided orientation sensing (stylus), a reference // frame is displayed if (info.m_sensedRotation == true) { // display a reference frame newCursor->setShowFrame(true); // set the size of the reference frame newCursor->setFrameSize(0.05, 0.05); } // crate a small label to indicate the position of the device cLabel* newPosLabel = new cLabel(); rootLabels->addChild(newPosLabel); newPosLabel->setPos(0, -20 * i, 0); newPosLabel->m_fontColor.set(0.6, 0.6, 0.6); labels[i] = newPosLabel; // increment counter i++; } // simulation in now running simulationRunning = true; // create a thread which starts the main haptics rendering loop cThread* hapticsThread = new cThread(); hapticsThread->set(updateHaptics, CHAI_THREAD_PRIORITY_HAPTICS); } void closeHaptic(void) { // stop the simulation simulationRunning = false; // wait for graphics and haptics loops to terminate while (!simulationFinished) { cSleepMs(100); } // close all haptic devices int i=0; while (i < numHapticDevices) { hapticDevices[i]->close(); i++; } } float getElecCellValue(int x, int y, int z, float *elecData, int *idData) { if( x < 0 || x >= VOL_X || y < 0 || y >= VOL_Y || z < 0 || z >= VOL_Z ) // Hard-code it for now { return 0; } unsigned offset = x + y * VOL_X + z * VOL_X * VOL_Y; return *(elecData + offset); } cVector3d toolCoord2VolCoord(cVector3d toolCoord) { //TODO: now it's rotation dependent, it shouldn't cVector3d result; float hapticWorkSpaceRadius = 1.f; if(numHapticDevices > 0) hapticWorkSpaceRadius = hapticDevices[0]->getSpecifications().m_workspaceRadius; result.x = toolCoord.y / (hapticWorkSpaceRadius * 2.f) * VOL_X; result.y = toolCoord.x / (hapticWorkSpaceRadius * 2.f) * VOL_Y; result.z = toolCoord.z / (hapticWorkSpaceRadius * 2.f) * VOL_Z; /* %%%Before cVector3d result; result.x = (toolCoord.y * VOL_X) / 0.4f; result.y = (-toolCoord.x * VOL_Y) / 0.4f; result.z = (toolCoord.z * VOL_Z) / 0.4f; */ return result; } void setCameraToolVector() { if(numHapticDevices > 0) { //Save previous camera before doing anything scene.setPreviousCameraCenter(scene.getCamera()); //Get vector: camera center -> tool hapticDevices[0]->getPosition(posTool); posTool = toolCoord2VolCoord(posTool); vect3d convertedPosTool(posTool.x, posTool.y, posTool.z); //Vector: camera center -> tool vCameraToolVect = convertedPosTool - *scene.getPreviousCameraCenter(); } } //Use the camera tool vector to set the new position of the tool object in GPU so it does not move //when transforms are made to the scene void setToolPositionGPU() { if(numHapticDevices > 0) { //TODO: for now, just rotation //Transform cameraToolVect with the same transformations as the ones for the camera vect3d posNewCameraTool; vect3d vtmp; mat_rot(vCameraToolVect, vtmp); //Add this vector to the camera center point vect3d *posCamera = scene.computeCameraCenter(scene.getCamera()); posNewCameraTool = *posCamera + vtmp; //Generate vector: cameraToolVect -> posNewCameraTool vect3d vOldToolNewTool = posNewCameraTool - (vCameraToolVect + *scene.getPreviousCameraCenter()); //Translate the object in the GPU setObjectCenterGPU(vOldToolNewTool, 1); } } //Just detect the tool's position and pass it to the GPU void moveToolPositionGPU() { if(numHapticDevices > 0) { //Transform cameraToolVect with the same transformations as the ones for the camera static cVector3d previousPosTool(0,0,0); cVector3d translation = posTool - previousPosTool; vect3d convertedTranslation(translation.x, translation.y, translation.z); //Translate the object in the GPU translateObjectGPU(convertedTranslation, 1); previousPosTool = posTool; } } //Just detect the tool's position and translate coordinates to volume coordinates void moveToolPositionCPU() { if(numHapticDevices > 0) { //Transform cameraToolVect with the same transformations as the ones for the camera cVector3d newPosTool; //Get vector: camera center -> tool hapticDevices[0]->getPosition(newPosTool); //Account for the tool's actual imprecisions //%%%newPosTool.mul(5); newPosTool = toolCoord2VolCoord(newPosTool); posTool = newPosTool; } } void updateHapticGraphics(void) { // update content of position label // read position of device an convert into millimeters if(numHapticDevices > 0) { //This is for drawing from the volume's camera moveToolPositionGPU(); //This is for drawing from the haptic's camera cVector3d pos; hapticDevices[0]->getPosition(pos); pos.mul(5); // create a string that concatenates the device number and its position. string strID; cStr(strID, 0); string strLabel = "#" + strID + " x: "; cStr(strLabel, pos.x, 2); strLabel = strLabel + " y: "; cStr(strLabel, pos.y, 2); strLabel = strLabel + " z: "; cStr(strLabel, pos.z, 2); labels[0]->m_string = strLabel; } //TODO: need to draw it correctly // camera->renderView(displayW, displayH); // check for any OpenGL errors GLenum err; err = glGetError(); if (err != GL_NO_ERROR) printf("Error: %s\n", gluErrorString(err)); } void ResetForces() { //Vibration object0->m_material.setVibrationFrequency(0); object0->m_material.setVibrationAmplitude(0); //Friction object0->m_material.setStiffness(0); object0->m_material.setStaticFriction(0); object0->m_material.setViscosity(0); } void updateHaptics(void) { // main haptic simulation loop while(simulationRunning) { if(numHapticDevices > 0) { // read position of haptic device cVector3d newPosition; hapticDevices[0]->getPosition(newPosition); // read orientation of haptic device cMatrix3d newRotation; hapticDevices[0]->getRotation(newRotation); // update position and orientation of cursor cursors[0]->setPos(newPosition); cursors[0]->setRot(newRotation); // read linear velocity from device cVector3d linearVelocity; hapticDevices[0]->getLinearVelocity(linearVelocity); // update arrow velocityVectors[0]->m_pointA = newPosition; velocityVectors[0]->m_pointB = cAdd(newPosition, linearVelocity); // read user button status bool buttonStatus; hapticDevices[0]->getUserSwitch(0, buttonStatus); // adjustthe color of the cursor according to the status of // the user switch (ON = TRUE / OFF = FALSE) if (buttonStatus) { cursors[0]->m_material = matCursorButtonON; } else { cursors[0]->m_material = matCursorButtonOFF; } //get value from data at the position of the tool (the converted position) moveToolPositionCPU(); float dataRange = fEnd-fStart; cVector3d newForce (0,0,0); float val = getElecCellValue(posTool.x + VOL_X/2.f, posTool.y + VOL_Y/2.f, posTool.z+VOL_Z/2.f, hostData, hostIdData) / dataRange; //printf("%f\n",val); // set haptic properties according to the voxel inside the volume // NOTE that there are two ways this is being done, first, object0 // has some properties, then some forces will be applied through the // tool variable and also some forces are applied directly to the // haptic device through hapticDevices[0]->setForce() if(typeOfForce == 1) { ResetForces(); //Vibration object0->m_material.setVibrationFrequency(50.f); object0->m_material.setVibrationAmplitude(1.0 * forceMax * val); } //Magnetic Force //object0->m_material.setStiffness(0.1 * stiffnessMax * val); //object0->m_material.setMagnetMaxForce(0.1 * 2000.0 * val); //object0->m_material.setMagnetMaxDistance(0.05); //object0->m_material.setViscosity(1.0 * dampingMax); else if(typeOfForce == 2) { ResetForces(); //Friction object0->m_material.setStiffness(0.1 * stiffnessMax * val); object0->m_material.setDynamicFriction(1.0 * 2000.0 * val); object0->m_material.setViscosity(1.0 * dampingMax); } else if (typeOfForce == 3) { ResetForces(); //Vibration object0->m_material.setVibrationFrequency(50.f); object0->m_material.setVibrationAmplitude(1.0 * forceMax * val); //Friction object0->m_material.setStiffness(0.1 * stiffnessMax * val); object0->m_material.setStaticFriction(1.0 * 2000.0 * val); object0->m_material.setViscosity(1.0 * dampingMax); } /*Question: which one reveals more the high value areas? Which one reveals more the structure of the rod?*/ // apply force field if (typeOfForce == 0) { //Compute force double Kp = 2000.0 * val; // [N/m] cVector3d force = cMul(-Kp, newPosition); newForce.add(force); //Damp cHapticDeviceInfo info = hapticDevices[0]->getSpecifications(); double Kv = info.m_maxLinearDamping*val; cVector3d force2 = cMul(-Kv, linearVelocity); newForce.add(force2); } // compute global reference frames for each object world->computeGlobalPositions(true); // 4 position and orientation of tool tool->updatePose(); // compute interaction forces tool->computeInteractionForces(); if(typeOfForce == 0) { // send computed force to haptic device (direct forces) hapticDevices[0]->setForce(newForce); } else { // send forces to device (like vibration) tool->applyForces(); } } } // exit haptics thread simulationFinished = true; } //***Haptic Functions End*** //Shader //shader variables GLuint fragShader; GLuint vertShader; GLuint program; GLint fragCompiled; GLint vertCompiled; const char *vertProgram; const char *fragProgram; void setupShaders() { if (!GL_ARB_vertex_program) { printf("No shaders!"); return; } FILE *file; file = fopen("./blend.frag","r"); if(file==NULL) { MessageBox(NULL,"Couldn't open frag file.","ERROR",MB_OK|MB_ICONEXCLAMATION); exit(0); } char *fragProg; int size=0; fseek(file, 0, SEEK_END); size = ftell(file)+1; fragProg = new char[size]; fseek(file, 0, SEEK_SET); size = fread(fragProg,1,size,file); fragProg[size]='\0'; fclose(file); fragProgram = fragProg; file = fopen("blend.vert","r"); if(file==NULL) { MessageBox(NULL,"Couldn't open vert file.","ERROR",MB_OK|MB_ICONEXCLAMATION); exit(0); } char *vertProg; size=0; fseek(file, 0, SEEK_END); size = ftell(file)+1; vertProg = new char[size]; fseek(file, 0, SEEK_SET); size = fread(vertProg,1,size,file); vertProg[size]='\0'; fclose(file); vertProgram = vertProg; vertShader = glCreateShader(GL_VERTEX_SHADER); fragShader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(fragShader, 1, &fragProgram, NULL); glShaderSource(vertShader, 1, &vertProgram, NULL); glCompileShader(vertShader); // getErrors(); // printShaderInfoLog(vertShader); glGetShaderiv(vertShader, GL_COMPILE_STATUS, &vertCompiled); glCompileShader(fragShader); // getErrors(); // printShaderInfoLog(fragShader); glGetShaderiv(fragShader, GL_COMPILE_STATUS, &fragCompiled); program = glCreateProgram(); glAttachShader(program, vertShader); glAttachShader(program, fragShader); glLinkProgram(program); glUseProgram(program); } void drawAxes() { glDisable(GL_TEXTURE_2D); glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(-100.f, 100.f, -100.f, 100.f, -10000.f, 10000.f); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(2); glShadeModel(GL_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glPushMatrix(); glRotatef(2.f, 1, 1, 0); glBegin(GL_LINES); { glColor4f(1.f, 0, 0, 0.2f); glVertex3f(-100.f, 0,0); glVertex3f(100.f,0,0); glColor4f(0, 1.f, 0, 0.2f); glVertex3f(0, -100.f,0); glVertex3f(0,100.f,0); glColor4f(0, 0, 1.f, 0); glVertex3f(0, 0,-1000.f); glColor4f(0, 0, 1.f, 1.f); glVertex3f(0,0,1000.f); } glEnd(); glPopMatrix(); glLineWidth(1); glDisable(GL_BLEND); glMatrixMode(GL_PROJECTION); glPopMatrix(); glMatrixMode(GL_MODELVIEW); glPopMatrix(); glEnable(GL_TEXTURE_2D); // check error GLenum err = glGetError(); if(err != GL_NO_ERROR) { printf("[GL ERROR] %s - %d : 0x%x\n", __FILE__, __LINE__, err); } } void drawCap() { glDisable(GL_TEXTURE_2D); glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glOrtho(-1000.f, 1000.f, -1000.f, 1000.f, -10000.f, 10000.f); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(2); glShadeModel(GL_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glPushMatrix(); glBegin(GL_TRIANGLES); { for(int i =0; i < pCap0->getTriCount(); i++) { glVertex3f( pCap0->getTriangle(i)->_vertices->data[0], pCap0->getTriangle(i)->_vertices->data[1], pCap0->getTriangle(i)->_vertices->data[2]); } } glEnd(); glPopMatrix(); glLineWidth(1); glDisable(GL_BLEND); glMatrixMode(GL_PROJECTION); glPopMatrix(); glMatrixMode(GL_MODELVIEW); glPopMatrix(); glEnable(GL_TEXTURE_2D); // check error GLenum err = glGetError(); if(err != GL_NO_ERROR) { printf("[GL ERROR] %s - %d : 0x%x\n", __FILE__, __LINE__, err); } } // static void resize(int w, int h) { //%%% displayW = w; displayH = h; //%%% glViewport(0, 0, w, h); //%%% // update position of labels rootLabels->setPos(10, displayH-70, 0); //%%% } static void destroy() { if(deviceData) { cudaFree(deviceData); deviceData = NULL; } if(idData) { cudaFree(idData); idData = NULL; } if(deviceTexData) { cudaFree(deviceTexData); } nanoGeoDestroy(); nanoPlaneDestroy(); internalCap0Destroy(); internalCap1Destroy(); SliceDestroy(); global_destroy(); exit(EXIT_SUCCESS); } int iWinId; void idle() { glutSetWindow(iWinId); glutPostRedisplay(); } clock_t nTick = 0; GLUI_RadioGroup *pCMGroup = NULL; GLUI_EditText *pImgPath = NULL; // Zooming static int InitViewZ = 6100; const int MaxViewZ = 12000; const int MinViewZ = 100; static int nCurrViewZ = InitViewZ; static int nZoomStep = -10; static int nRotStep = 1; void zoom_cam(Camera *pCam, float deltaStep); void rotate_cam(Camera *pCam, float deltaAngle, vect3d &axis); void capture(); static int volCount = 0; static void display() { nTick = clock(); glClear(GL_COLOR_BUFFER_BIT); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0, 1, 0, 1); //%%% glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt( 0, 0, 1, 0, 0, 0, 0, 1, 0); glDisable(GL_DEPTH_TEST); //%%% scene.setTFMode(iTfMode); scene.compute(); scene.render( pCMGroup->get_int_val() == 0 ? NULL : pImgPath->get_text()); //%%% //Render Haptic graphcis updateHapticGraphics(); //%%% if(shouldDrawAxes) drawAxes(); glutSwapBuffers(); clock_t nCount = clock() - nTick; printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b FPS: %.4f", 1000.f / (nCount * 1.0)); cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { printf("DUDE: %s \n", cudaGetErrorString(err)); } // //if(volCount <= 360) //{ // capture(); // //nCurrViewZ += nZoomStep; // //zoom_cam(scene.getCamera(), -nZoomStep); // rotate_cam(scene.getCamera(), -nRotStep, vect3d(0, 0.3, 1)); //} } /// Rotation static void rotate_cam(Camera *pCam, float deltaAngle, vect3d &axis) { PerpCamera *pPCam = dynamic_cast<PerpCamera *>(pCam); // Rotate deltaAngle *= - PIon180; set_matrix(sinf(deltaAngle), cosf(deltaAngle), axis); vect3d tmp; mat_rot(pPCam->_eyePos, tmp); vecCopy(pPCam->_eyePos, tmp); mat_rot(pPCam->_ctrPos, tmp); vecCopy(pPCam->_ctrPos, tmp); mat_rot(pPCam->_upVec, tmp); vecCopy(pPCam->_upVec, tmp); mat_rot(pPCam->_rightVec, tmp); vecCopy(pPCam->_rightVec, tmp); mat_rot(pPCam->_dir, tmp); vecCopy(pPCam->_dir, tmp); } static void zoom_cam(Camera *pCam, float deltaStep) { PerpCamera *pPCam = dynamic_cast<PerpCamera *>(pCam); vect3d deltaVec; vect3d eye(pPCam->_eyePos[0], pPCam->_eyePos[1], pPCam->_eyePos[2]); vect3d ctr(pPCam->_ctrPos[0], pPCam->_ctrPos[1], pPCam->_ctrPos[2]); vect3d viewVec; points2vec(eye, ctr, viewVec); vecCopy(deltaVec, viewVec); normalize(deltaVec); vecScale(deltaVec, deltaStep, deltaVec); //printf("->%.3f,%.3f,%.3f \n", viewVec.data[0], viewVec.data[0], viewVec.data[0]); point2point(pPCam->_eyePos, deltaVec, pPCam->_eyePos); } static void specialKey(int key, int x, int y) { switch(key) { /// /// Zooming /// case GLUT_KEY_UP: //if( (nCurrViewZ - nZoomStep) >= MinViewZ) { nCurrViewZ -= nZoomStep; zoom_cam(scene.getCamera(), nZoomStep); printf("Zoom-in to %d\n", nCurrViewZ); } break; case GLUT_KEY_DOWN: //if( (nCurrViewZ + nZoomStep) <= MaxViewZ) { nCurrViewZ += nZoomStep; zoom_cam(scene.getCamera(), -nZoomStep); printf("Zoom-out to %d\n", nCurrViewZ); } break; /// /// Rotation /// case GLUT_KEY_RIGHT: printf(" Right Rot: %d\n", nRotStep); rotate_cam(scene.getCamera(), -nRotStep, vect3d(0, 0.3, 1)); break; case GLUT_KEY_LEFT: printf(" Left Rot: %d\n", -nRotStep); rotate_cam(scene.getCamera(), nRotStep, vect3d(0, 0.3, 1)); break; } } void capture() { // Taking & Saving the screenshot if(ilutGLScreen()) { ilEnable(IL_FILE_OVERWRITE); char path[20] = {0}; sprintf(path, "Y:/tony/vol_%d.jpg", volCount ++); if(ilSaveImage(path)) { printf("Screenshot saved successfully as \'%s\'!\n", path); } else { printf("Sorry, DevIL cannot save your screenshot...\n"); } } else { printf(" Sorry man, DevIL screenshot taking failed...\n"); } } static void key(unsigned char key, int x, int y) { PerpCamera *pCam = (PerpCamera*)scene.getCamera(); switch (key) { case 'm': printf("DIR: %.5f. %.5f. %.5f\n", pCam->_dir[0], pCam->_dir[1], pCam->_dir[2]); printf("CTR: %.5f. %.5f. %.5f\n", pCam->_eyePos[0], pCam->_eyePos[1], pCam->_eyePos[2]); printf("UP : %.5f. %.5f. %.5f\n", pCam->_upVec[0], pCam->_upVec[1], pCam->_upVec[2]); break; case 'c': case 'C': capture(); break; case 27 : case 'q': destroy(); break; } //glutPostRedisplay(); } void printUsage() { char *strUsage = "{ How to Use } \n\n" " Wheel Up\\Down to Zoom-in\\out \n" " Mouse Drag to Rotate \n" " C: save image \n" " Q: quit \n\n"; printf(strUsage); } /// Radio Group Callback GLUI_RadioGroup *pGroup = NULL; GLUI_Panel *pHPPal = NULL; void file_callback(int pParam) { printf("Image Path: %s\n", pImgPath->get_text()); loadTexture(pImgPath->get_text()); } GLUI_Panel *pClrP = NULL; GLUI_Panel *pClrChooseP = NULL; GLUI_Panel *pFileChooseP = NULL; void color_map_choice_callback(int pParam) { int val = pCMGroup->get_int_val(); switch(val) { case 0: // Values pClrP->enable(); pClrChooseP->enable(); pFileChooseP->disable(); break; case 1: // Picture pClrP->disable(); pClrChooseP->disable(); pFileChooseP->enable(); #ifndef DATA_2D pFileChooseP->disable(); #endif loadTexture(pImgPath->get_text()); break; } mMode = val; } void radio_group_callback(int pParam) { int val = pGroup->get_int_val(); switch(val) { case 0: pHPPal->disable(); printf("\nAverage Mode Selected.\n"); break; case 1: pHPPal->disable(); printf("\nSolid Mode Selected.\n"); break; case 2: // hermite pHPPal->enable(); printf("\nHermite Mode Selected.\n"); break; case 3: pHPPal->disable(); printf("\nFirst Mode Selected.\n"); break; } iTfMode = val; } static int pressX = -1; static int pressY = -1; void myGlutMouse(int button, int button_state, int x, int y) { // Zooming // if(button==GLUT_WHEEL_DOWN) { if( (nCurrViewZ + nZoomStep) <= MaxViewZ) { nCurrViewZ += nZoomStep; zoom_cam(scene.getCamera(), -nZoomStep); printf("Zoom-out to %d\n", nCurrViewZ); } } if(button==GLUT_WHEEL_UP) { if( (nCurrViewZ - nZoomStep) >= MinViewZ) { nCurrViewZ -= nZoomStep; zoom_cam(scene.getCamera(), nZoomStep); printf("Zoom-in to %d\n", nCurrViewZ); } } // Rotating // if (button==GLUT_LEFT_BUTTON && button_state==GLUT_DOWN) { pressX = x; pressY = y; //%%% //generate a vector from camera to tool // setCameraToolVector(); //%%% } if (button==GLUT_LEFT_BUTTON && button_state==GLUT_UP) { int dx = x - pressX; int dy = pressY - y; if( (abs(dx) + abs(dy)) > 3 ) { float fSpeed = 0.2; Camera *pCam = scene.getCamera(); // Drag Vec vect3d dragVec; vect3d tmpHori, tmpVert; vecScale(pCam->_rightVec, dx, tmpHori); vecScale(pCam->_upVec, dy, tmpVert); point2point(dragVec, tmpHori, dragVec); point2point(dragVec, tmpVert, dragVec); float dragLen = sqrt( (float)dx * dx + (float)dy * dy ); vect3d axis(0, 1, 0); cross_product(dragVec, pCam->_dir, axis); normalize(axis); rotate_cam(pCam, fSpeed * dragLen, axis); //%%% //Assigns new tool position // setToolPositionGPU(); //%%% } } } /// #include "data_loader.cu" /// /// /// int main(int argc, char* argv[]) { cudaGLSetGLDevice(0); printUsage(); // Window Setup glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(WinWidth, WinHeight); glutInitWindowPosition(WinLeft, WinTop); iWinId = glutCreateWindow(WinTitle); glutReshapeFunc(resize); glutDisplayFunc(display); glutIdleFunc(idle); glutKeyboardFunc(key); glutSpecialFunc(specialKey); // scene.init(); global_init(); atexit(destroy); #if 1 /// /// Load Files and copy data to GPU /// const unsigned x_dim = VOL_X; const unsigned y_dim = VOL_Y; const unsigned z_dim = VOL_Z; cudaMalloc(&deviceData, sizeof(float) * 3 * x_dim * y_dim * z_dim); // x, y, z if(deviceData == NULL) { printf("CUDA Memory Allocation failed...\n"); system("pause"); } cudaMalloc(&idData, sizeof(int) * x_dim * y_dim * z_dim); // x, y, z if(idData == NULL) { printf("CUDA Memory Allocation failed...\n"); system("pause"); } //%%%Initialize host data for haptic use hostData = (float*)malloc(sizeof(float) * 3 * x_dim * y_dim * z_dim); hostIdData = (int*)malloc(sizeof(int) * x_dim * y_dim * z_dim); //%%% #ifndef DATA_2D bool bLoaded = loadData( x_dim, y_dim, z_dim, DATA_PATH, deviceData, idData, hostData, hostIdData); #else bool bLoaded = loadData2D( x_dim, y_dim, z_dim, DATA_PATH, deviceData, hostData); #endif scene.setElecData(deviceData, idData); #endif // GLUI GLUI *glui = GLUI_Master.create_glui( "Param Control", 0, WinWidth + WinLeft, WinTop ); // Kernel Part // { GLUI_Panel *pPal0 = glui->add_panel("Nanorod Param"); // Multi-sample Count GLUI_Spinner *pMS = glui->add_spinner_to_panel(pPal0, "Anti-Alias #", GLUI_SPINNER_INT, &nMultiSampleCount); pMS->set_int_limits(1, 40); pMS->set_speed(1); #ifndef DATA_2D pMS->disable(); #endif // Sampling Dist Radius GLUI_Spinner *pDF = glui->add_spinner_to_panel(pPal0, "Sampling Rad #", GLUI_SPINNER_FLOAT, &fSamplingDeltaFactor); pDF->set_float_limits(0.01, 60); pDF->set_speed(1); #ifndef DATA_2D pDF->disable(); #endif // Show Geo GLUI_Checkbox *pGeoChk = glui->add_checkbox_to_panel(pPal0, "Show Nanorod", &bShowGeo); GLUI_Checkbox *pRodChk = glui->add_checkbox_to_panel(pPal0, "Data in Nanorod", &bOnlyInRod); //%%% Type of force to be rendered GLUI_Listbox *pListForce = glui->add_listbox_to_panel(pPal0, "Type of Force", &typeOfForce); { pListForce->add_item(0, "Force 1"); pListForce->add_item(1, "Force 2"); pListForce->add_item(2, "Force 3"); pListForce->add_item(3, "Force 2 & 3"); } GLUI_Checkbox *drawAxesChk = glui->add_checkbox_to_panel(pPal0, "Draw axes", &shouldDrawAxes); //%%% GLUI_Spinner *pNanoAlpha = glui->add_spinner_to_panel(pPal0, "Nanorod Alpha", GLUI_SPINNER_FLOAT, &fNanoAlpha); pNanoAlpha->set_float_limits(0, 1); GLUI_Checkbox *pSliceChk = glui->add_checkbox_to_panel(pPal0, "Show Slice", &bShowSlice); GLUI_Checkbox *pPlaneChk = glui->add_checkbox_to_panel(pPal0, "Show Plane", &bShowPlane); GLUI_Spinner *pPlaneAlpha = glui->add_spinner_to_panel(pPal0, "Plane Alpha", GLUI_SPINNER_FLOAT, &fPlaneAlpha); pNanoAlpha->set_float_limits(0, 1); GLUI_Panel *pTFPal = glui->add_panel("Transfer Function"); #ifndef DATA_2D pTFPal->disable(); #endif pGroup = glui->add_radiogroup_to_panel(pTFPal, NULL, -1, radio_group_callback); glui->add_radiobutton_to_group( pGroup, "Average" ); glui->add_radiobutton_to_group( pGroup, "Solid" ); glui->add_radiobutton_to_group( pGroup, "Hermite" ); pHPPal = glui->add_panel("Hermite Param"); glui->add_edittext_to_panel(pHPPal, "P0 Val", GLUI_EDITTEXT_FLOAT, &fP0_val); glui->add_edittext_to_panel(pHPPal, "P0 Deriv", GLUI_EDITTEXT_FLOAT, &fP0_der); glui->add_edittext_to_panel(pHPPal, "P1 Val", GLUI_EDITTEXT_FLOAT, &fP1_val); glui->add_edittext_to_panel(pHPPal, "P1 Deriv", GLUI_EDITTEXT_FLOAT, &fP1_der); pHPPal->disable(); } { GLUI *glui2 = GLUI_Master.create_glui( "", 0, WinWidth + WinLeft + 200, WinTop); // GLUI_Panel *pPalP = glui2->add_panel("Clip Plane"); glui2->add_checkbox_to_panel(pPalP, "Enable", &bClipPlaneEnabled); glui2->add_statictext_to_panel(pPalP, "Plane Center"); GLUI_Spinner *pc0 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeCtr + 0); pc0->set_float_limits(-(VOL_X/2), (VOL_X/2)); GLUI_Spinner *pc1 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeCtr + 1); pc1->set_float_limits(-(VOL_Y/2), (VOL_Y/2)); GLUI_Spinner *pc2 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeCtr + 2); pc2->set_float_limits(-(VOL_Z/2), (VOL_Z/2)); glui2->add_statictext_to_panel(pPalP, "Plane Normal"); GLUI_Spinner *pn0 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeNorm + 0); GLUI_Spinner *pn1 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeNorm + 1); GLUI_Spinner *pn2 = glui2->add_spinner_to_panel(pPalP, "", GLUI_SPINNER_FLOAT, planeNorm + 2); // GLUI_Panel *pSelP = glui2->add_panel("Data Selection"); #ifndef DATA_2D pSelP->disable(); #endif glui2->add_checkbox_to_panel(pSelP, "ID 1", mark + 0); glui2->add_checkbox_to_panel(pSelP, "ID 2", mark + 1); glui2->add_checkbox_to_panel(pSelP, "ID 3", mark + 2); glui2->add_checkbox_to_panel(pSelP, "ID 4", mark + 3); } { GLUI *glui3 = GLUI_Master.create_glui( "", 0, WinWidth + WinLeft + 400, WinTop); GLUI_Panel *pChoiceP = glui3->add_panel("Choice"); #ifndef DATA_2D pChoiceP->disable(); #endif pCMGroup = glui->add_radiogroup_to_panel(pChoiceP, NULL, -1, color_map_choice_callback); glui3->add_radiobutton_to_group( pCMGroup, "Values"); glui3->add_radiobutton_to_group( pCMGroup, "Picture"); pCMGroup->set_int_val(mMode); glui3->add_separator(); pClrP = glui3->add_panel("Color Map Values"); GLUI_Spinner *pcl0 = glui3->add_spinner_to_panel(pClrP, "Val-0", GLUI_SPINNER_FLOAT, knotValues + 0); pcl0->set_float_limits(-20, 20); GLUI_Spinner *pcl1 = glui3->add_spinner_to_panel(pClrP, "Val-1", GLUI_SPINNER_FLOAT, knotValues + 1); pcl1->set_float_limits(-20, 20); GLUI_Spinner *pcl2 = glui3->add_spinner_to_panel(pClrP, "Val-2", GLUI_SPINNER_FLOAT, knotValues + 2); pcl2->set_float_limits(-20, 20); GLUI_Spinner *pcl3 = glui3->add_spinner_to_panel(pClrP, "Val-3", GLUI_SPINNER_FLOAT, knotValues + 3); pcl3->set_float_limits(-20, 20); GLUI_Spinner *pcl4 = glui3->add_spinner_to_panel(pClrP, "Val-4", GLUI_SPINNER_FLOAT, knotValues + 4); pcl4->set_float_limits(-20, 20); // pClrChooseP = glui3->add_panel("Color Map Colors"); #ifndef DATA_2D pClrChooseP->disable(); #endif GLUI_Listbox *pList0 = glui3->add_listbox_to_panel(pClrChooseP, "Color 0", knotColors + 0); { pList0->add_item(0, "White"); pList0->add_item(1, "Black"); pList0->add_item(2, "Red"); pList0->add_item(3, "Orange"); pList0->add_item(4, "Yellow"); pList0->add_item(5, "Green"); pList0->add_item(6, "Cyan"); pList0->add_item(7, "Blue"); pList0->add_item(8, "Purple"); pList0->add_item(9, "Gray"); } pList0->set_int_val(knotColors[0]); GLUI_Listbox *pList1 = glui3->add_listbox_to_panel(pClrChooseP, "Color 1", knotColors + 1); { pList1->add_item(0, "White"); pList1->add_item(1, "Black"); pList1->add_item(2, "Red"); pList1->add_item(3, "Orange"); pList1->add_item(4, "Yellow"); pList1->add_item(5, "Green"); pList1->add_item(6, "Cyan"); pList1->add_item(7, "Blue"); pList1->add_item(8, "Purple"); pList1->add_item(9, "Gray"); } pList1->set_int_val(knotColors[1]); GLUI_Listbox *pList2 = glui3->add_listbox_to_panel(pClrChooseP, "Color 2", knotColors + 2); { pList2->add_item(0, "White"); pList2->add_item(1, "Black"); pList2->add_item(2, "Red"); pList2->add_item(3, "Orange"); pList2->add_item(4, "Yellow"); pList2->add_item(5, "Green"); pList2->add_item(6, "Cyan"); pList2->add_item(7, "Blue"); pList2->add_item(8, "Purple"); pList2->add_item(9, "Gray"); } pList2->set_int_val(knotColors[2]); GLUI_Listbox *pList3 = glui3->add_listbox_to_panel(pClrChooseP, "Color 3", knotColors + 3); { pList3->add_item(0, "White"); pList3->add_item(1, "Black"); pList3->add_item(2, "Red"); pList3->add_item(3, "Orange"); pList3->add_item(4, "Yellow"); pList3->add_item(5, "Green"); pList3->add_item(6, "Cyan"); pList3->add_item(7, "Blue"); pList3->add_item(8, "Purple"); pList3->add_item(9, "Gray"); } pList3->set_int_val(knotColors[3]); GLUI_Listbox *pList4 = glui3->add_listbox_to_panel(pClrChooseP, "Color 4", knotColors + 4); { pList4->add_item(0, "White"); pList4->add_item(1, "Black"); pList4->add_item(2, "Red"); pList4->add_item(3, "Orange"); pList4->add_item(4, "Yellow"); pList4->add_item(5, "Green"); pList4->add_item(6, "Cyan"); pList4->add_item(7, "Blue"); pList4->add_item(8, "Purple"); pList4->add_item(9, "Gray"); } pList4->set_int_val(knotColors[4]); glui3->add_separator(); pFileChooseP = glui3->add_panel("File Choose"); pImgPath = glui3->add_edittext_to_panel(pFileChooseP, "Img Path", GLUI_EDITTEXT_TEXT, NULL, -1, file_callback); pImgPath->set_text(CM_IMG); glui3->add_spinner_to_panel(pFileChooseP, "Start", GLUI_SPINNER_FLOAT, &fStart); glui3->add_spinner_to_panel(pFileChooseP, "End", GLUI_SPINNER_FLOAT, &fEnd); #ifndef DATA_2D pFileChooseP->disable(); #endif } GLUI_Master.set_glutIdleFunc(idle); GLUI_Master.set_glutMouseFunc(myGlutMouse); /// /// Setup Scene /// { /// Camera /// CamType eCamType = PERSP; SamplingType eSplType = STRATIFIED; //// Top //vect3d ctr(0, 0, InitViewZ); //vect3d view(0, 0, -1); //vect3d up(0, 1, 0); //// Bottom //vect3d ctr(0, 0, -InitViewZ); //vect3d view(0, 0, 1); //vect3d up(0, 1, 0); //// Side //vect3d ctr(InitViewZ, 0, 0); //vect3d view(-1, 0, 0); //vect3d up(0, 0, 1); //// 90 //vect3d ctr(0, InitViewZ, 0); //vect3d view(0, -1, 0); //vect3d up(0, 0, 1); //// look-down-cap //vect3d ctr(0, InitViewZ, InitViewZ * 0.3 - 20); //vect3d view(0, -1, -0.3); //vect3d up(0, 0.3, 1); //// cut-plane //vect3d ctr(5539.27002, 4021.99023, -929.16742); //vect3d view(-121.07777, -87.91293, 20.30981); //vect3d up(0.53435, 0.43612, 5.07333); //// Poly-Plane // vect3d view(-194.79440, 156.42287, -24.24892); // vect3d ctr(5516.48730, -4429.82227, 686.71802); // vect3d up(-0.36831, 0.33132, 5.09597); // Poly-Piece // vect3d view(15.01775, 179.30638, 89.58064); vect3d ctr(-531.08990, -6341.02490, -3167.94580); vect3d up(-0.18784, -2.27411, 4.58339); //// SQW - view //vect3d view(13.06205, -26.43243, -9.57651); //vect3d ctr(-2612.41113, 5286.48486, 1915.30151); //vect3d up(0.66073, -1.43767, 4.86939); // Set Camera Camera *pCam = NULL; switch(eCamType) { case PERSP: pCam = new PerpCamera(300, ctr, up, view, 500, ViewPlaneRatio); break; case ORTHO: pCam = new OrthoCamera(ctr, up, view, 10, ViewPlaneRatio); break; } pCam->setSampler(eSplType); pCam->setMultiSamplingCount(nMultiSampleCount); scene.setCamera(pCam); scene.setAmbiColor(vect3d(0,0,0)); /// Generate Rays for GPU /// sendConstants2GPU(); /// Volume Cube /// vect3d cubeCtr(0, 0, 0); vect3d vertVec(0, 1, 0); vect3d horiVec(0, 0, 1); Cube *pCube0 = new Cube(x_dim, z_dim, y_dim, cubeCtr, vertVec, horiVec); scene.addObject(pCube0); #ifndef DATA_2D //%%%Add my little cube representing the tool Cube *pCubeTool = new Cube(x_dim / 30.f, z_dim / 46.8f, y_dim / 30.f, cubeCtr, vertVec, horiVec); scene.addObject(pCubeTool); scene.setDataDim(x_dim, y_dim, z_dim); #endif int half_x = x_dim / 2; int half_y = y_dim / 2; int half_z = z_dim / 2; Tracer::setVolBBox( - half_x - 1, half_x + 1, - half_y - 1, half_y + 1, - half_z - 1, half_z + 1); copySceneGeomotry(); #if 1 //c10w20 float factorNano = 0.90;//1 float factorSlice = factorNano; float factorCap0 = 0.63;//0.9 float factorCap1 = 0.5; float factorPlane = factorNano; float offset = 0; //50; //43;//34.5; //float factorNano = 1;//1 //float factorSlice = factorNano; //float factorCap0 = factorNano;//0.9 //float factorCap1 = factorNano; //float factorPlane = factorNano; //float offset = 34.5; #else float factorNano = 1; float factorSlice = 1; float factorCap0 = 1; float factorCap1 = 1; float factorPlane = 1; float offset = 0; #endif /// /// Nanorod Geometry /// printf("- Loading Nanorod Geometry ..."); { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pObj->load("nanorod.obj"); pObj->setSmooth(false); vect3d spec(1,1,1); vect3d diff(1,1,1); vect3d ambi(1,1,1); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorNano, 0.82 * factorNano, 0.70 * factorNano); pObj->translate(0,0,-3 - offset); printf("Done \n"); printf("- Transfering Nanorod Geometry to GPU..."); copyNanoGeo(pObj, offset); printf("Done \n"); } printf("- Loading Slice Geometry ..."); { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pObj->load("slice.obj"); pObj->setSmooth(false); vect3d spec(1,1,1); vect3d diff(1,1,1); vect3d ambi(0.2,0.2,0.2); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); vect3d axis0(0, 0, 1); float angle0 = -120; pObj->rotate(angle0, axis0); pObj->scale(0.82 * factorSlice, 0.82 * factorSlice, 0.70 * factorSlice); pObj->translate(0,0,-3 - offset); printf("Done \n"); printf("- Transfering Slice Geometry to GPU..."); copySlice(pObj); printf("Done \n"); } /// 1. Internal Cap 0 { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pCap0 = pObj; pObj->load("nanorod.obj"); pObj->setSmooth(false); vect3d spec(0.3,0.3,0.3); vect3d diff(0.3, 0.3, 0.3); vect3d ambi(0.3, 0.3, 0.3); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorCap0, 0.82 * factorCap0, 0.70 * factorCap0); pObj->translate(0,0,-3-offset); printf("Done \n"); printf("- Transfering Internal Cap Geometry to GPU..."); copyInternalCap0(pObj, offset); printf("Done \n"); } /// 1. Internal Cap 1 { ObjObject *pObj = new ObjObject(0, 0, 0, 0); pObj->load("nanorod.obj"); pObj->setSmooth(false); vect3d spec(0.3,0.3,0.3); vect3d diff(0.3,0.3,0.3); vect3d ambi(0.3, 0.3, 0.3); pObj->setMaterial(spec, diff, ambi, 70); vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorCap1, 0.82 * factorCap1, 0.70 * factorCap1); pObj->translate(0,0,-3-offset); printf("Done \n"); printf("- Transfering Internal Cap Geometry to GPU..."); copyInternalCap1(pObj, offset); printf("Done \n"); } /// Load nanoPlane { ObjObject *pObj = new ObjObject(0, 0, 0, 0); #ifndef DATA_2D pObj->load("nanoPlane.obj"); #else pObj->load("2d_plane.obj"); #endif pObj->setSmooth(false); vect3d spec(0.3,0.3,0.3); vect3d diff(0.3,0.3,0.3); vect3d ambi(1,1,1); pObj->setMaterial(spec, diff, ambi, 70); #ifndef DATA_2D vect3d axis(1, 0, 0); float angle = -90; pObj->rotate(angle, axis); pObj->scale(0.82 * factorPlane * 1.01, 0.82 * factorPlane * 1.01, 0.70 * factorPlane * 1.01); pObj->translate(0,0,-3 - offset); #else pObj->scale(0.96,1,1); pObj->translate(-4.5,0, 3); #endif printf("Done \n"); printf("- Transfering NanoPlane Geometry to GPU..."); copyNanoPlane(pObj, offset); printf("Done \n"); } } color_map_choice_callback(0); #ifndef DATA_2D //%%%Haptics initHaptic(); //%%% setupShaders(); #endif /// Go nTick = clock(); glutMainLoop(); //%%% closeHaptic(); //%%% destroy(); return EXIT_SUCCESS; }
aaf90dcfca6a638a3249416dc08b3ad7028c2270.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void dwt_per_X(float *d_ip, int rows, int cols, int cA_cols, int filt_len, int Halo_steps, float *d_cL, float *d_cH) { extern __shared__ float s_Data[]; //Offset to the left halo edge const int baseX = ((blockIdx.x * 2 * X_RESULT_STEPS) - Halo_steps) * X_BLOCKDIM_X + threadIdx.x; const int baseX1 = (blockIdx.x * X_RESULT_STEPS) * X_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * X_BLOCKDIM_Y + threadIdx.y; if (baseY < rows) { d_ip += baseY * cols + baseX; d_cL += baseY * cA_cols + baseX1; d_cH += baseY * cA_cols + baseX1; //Loading data to shared memory if (cols % 2 == 1) { //Load Left Halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { if (baseX + i * X_BLOCKDIM_X == -1) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[cols - 1]; else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = (baseX + i * X_BLOCKDIM_X >= 0) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X + cols + 1]; } // main data and Load right halo #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * X_RESULT_STEPS + Halo_steps; i++) { if (baseX + i * X_BLOCKDIM_X == cols) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[i * X_BLOCKDIM_X - 1]; else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = ((baseX + i * X_BLOCKDIM_X) < cols) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X - cols - 1]; } //Compute and store results __syncthreads(); } else { #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = (baseX + i * X_BLOCKDIM_X >= 0) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X + cols]; } // main data and Load right halo #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * X_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = ((baseX + i * X_BLOCKDIM_X) < cols) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X - cols]; } //Compute and store results __syncthreads(); } #pragma unroll for (int i = 0; i < X_RESULT_STEPS; i++) { if ((baseX1 + i * X_BLOCKDIM_X < cA_cols)) { float sum_cL = 0, sum_cH = 0; int l2 = filt_len / 2; for (int l = 0; l < filt_len; ++l) { sum_cL += c_lpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l]; //l2-l is to select the right center pixels with odd and even sized filters sum_cH += c_hpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l]; } d_cL[i * X_BLOCKDIM_X] = sum_cL; d_cH[i * X_BLOCKDIM_X] = sum_cH; } } } }
aaf90dcfca6a638a3249416dc08b3ad7028c2270.cu
#include "includes.h" __global__ void dwt_per_X(float *d_ip, int rows, int cols, int cA_cols, int filt_len, int Halo_steps, float *d_cL, float *d_cH) { extern __shared__ float s_Data[]; //Offset to the left halo edge const int baseX = ((blockIdx.x * 2 * X_RESULT_STEPS) - Halo_steps) * X_BLOCKDIM_X + threadIdx.x; const int baseX1 = (blockIdx.x * X_RESULT_STEPS) * X_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * X_BLOCKDIM_Y + threadIdx.y; if (baseY < rows) { d_ip += baseY * cols + baseX; d_cL += baseY * cA_cols + baseX1; d_cH += baseY * cA_cols + baseX1; //Loading data to shared memory if (cols % 2 == 1) { //Load Left Halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { if (baseX + i * X_BLOCKDIM_X == -1) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[cols - 1]; else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = (baseX + i * X_BLOCKDIM_X >= 0) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X + cols + 1]; } // main data and Load right halo #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * X_RESULT_STEPS + Halo_steps; i++) { if (baseX + i * X_BLOCKDIM_X == cols) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[i * X_BLOCKDIM_X - 1]; else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = ((baseX + i * X_BLOCKDIM_X) < cols) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X - cols - 1]; } //Compute and store results __syncthreads(); } else { #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = (baseX + i * X_BLOCKDIM_X >= 0) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X + cols]; } // main data and Load right halo #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * X_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = ((baseX + i * X_BLOCKDIM_X) < cols) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X - cols]; } //Compute and store results __syncthreads(); } #pragma unroll for (int i = 0; i < X_RESULT_STEPS; i++) { if ((baseX1 + i * X_BLOCKDIM_X < cA_cols)) { float sum_cL = 0, sum_cH = 0; int l2 = filt_len / 2; for (int l = 0; l < filt_len; ++l) { sum_cL += c_lpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l]; //l2-l is to select the right center pixels with odd and even sized filters sum_cH += c_hpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l]; } d_cL[i * X_BLOCKDIM_X] = sum_cL; d_cH[i * X_BLOCKDIM_X] = sum_cH; } } } }
96ee82780bd969233f4af08c43b528a0d546dc80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // counting Hamilton cycle, CUDA acceleration #include<stdio.h> #include<stdlib.h> #define MAX_BLOCK_SIZE 256 #define MAX_ARRAY_SIZE (1024*8) #define WORK_LIMIT 10000000 typedef unsigned long long u64; // any 2 <= mod <= 2^31 should work __host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) { unsigned c = a+b; return c >= mod ? c-mod : c; } __host__ __device__ u64 mod_sum64(u64 a, u64 b, u64 mod) { u64 c = a+b; return c >= mod ? c-mod : c; } template<int k> __launch_bounds__(MAX_BLOCK_SIZE) __global__ void ha2(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) { __shared__ unsigned long long qc[1024]; // transition count __shared__ unsigned long long ai[64]; // adjacency matrix as bitset //const int k = blockDim.x; const int tid = threadIdx.x; const int bid = threadIdx.y + blockIdx.x * blockDim.y; const int sha = threadIdx.y * k; const int gridSize = blockDim.y * gridDim.x; unsigned long long s = part[bid]; unsigned long long mask = (1ull<<k) - 1; unsigned long long total = 0; // fetch adjacency matrix for (int i = tid+sha; i < n; i += blockDim.y * k) { unsigned long long aa = 0; for (int j = 0; j < n; j++) { aa = aa | static_cast<unsigned long long>(adj[i * n + j]) << j; } ai[i] = aa; } __syncthreads(); for (int runs = 0; runs < work; runs += gridSize) { unsigned at; { unsigned long long row = s; for (int i = 0; i < tid; i++) { row = row & (row-1); } at = __ffsll(row)-1; } // making row "long long" would make program 3x slow, so I use 2 unsigned int unsigned row = 0, row2 = 0; { // build transition table unsigned long long me = ai[at]; for (int i = n-2; i >= 0; i--) { if (s>>i & 1) { row2 = row2 << 1 | row >> 31; row = row + row + (me>>i & 1); } } // initial state qc[tid+sha] = (me >> (n-1)) & 1; __syncthreads(); } // calculate each transition, uses GPU SIMD feature for (int t = 1; t < n-1; t++) { unsigned long long sum = 0; unsigned rr = row; for (int i = 0; i < min(k, 32); i++) { //sum = mod_sum(sum, qc[i+sha] * (row>>i & 1), mod); //sum = mod_sum64(sum, qc[i+sha] * (rr & 1), mod); //sum = mod_sum64(sum, qc[i+sha] * dd[i], mod); sum = mod_sum64(sum, qc[i+sha] & 0LL-(rr & 1), mod); rr >>= 1; } if (k > 32) { rr = row2; for (int i = 0; i < k-32; i++) { sum = mod_sum64(sum, qc[i+32+sha] & 0ULL-(rr & 1), mod); rr >>= 1; } } __syncthreads(); qc[tid+sha] = sum; __syncthreads(); } // last transition { if (!(ai[n-1] >> at & 1)) qc[tid+sha] = 0; __syncthreads(); unsigned long long count = 0; for (int i = 0; i < k; i++) { count = mod_sum64(count, qc[i+sha], mod); } //if (tid==0) printf("[%d:%d],", s, count); if (runs + bid < work) { total = mod_sum64(count, total, mod); } } // get next work unsigned bit = s & (-s); s += bit; s |= mask >> __popcll(s); __syncthreads(); } if (tid == 0) { // output total for this block ret[bid] = total; } } // k=1 is useless actually template<> __global__ void ha2<1>(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) { const int bid = threadIdx.y + blockIdx.x * blockDim.y; ret[bid] = 0; } int n; int adj[64*64]; unsigned part[MAX_ARRAY_SIZE]; unsigned long long ret[MAX_ARRAY_SIZE]; long long nCr[65][65]; u64 getComb(long long idx, int n, int r) { u64 ans = 0; n -= 1; while (r > 0) { if (idx < nCr[n][r]) n -= 1; else { ans |= u64(1)<<(n); idx -= nCr[n][r]; n -= 1; r -= 1; } } return ans; } void ha4(int gridSize, int blockSize, int k, int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) { dim3 bsz(k, blockSize); switch (k) { #define HA4_k(k) case k:hipLaunchKernelGGL(( ha2<k>), dim3(gridSize), dim3(bsz), 0, 0, n, work, part, adj, ret, mod); break; HA4_k(1)HA4_k(2)HA4_k(3)HA4_k(4)HA4_k(5) HA4_k(6)HA4_k(7)HA4_k(8)HA4_k(9)HA4_k(10) HA4_k(11)HA4_k(12)HA4_k(13)HA4_k(14)HA4_k(15) HA4_k(16)HA4_k(17)HA4_k(18)HA4_k(19)HA4_k(20) HA4_k(21)HA4_k(22)HA4_k(23)HA4_k(24)HA4_k(25) HA4_k(26)HA4_k(27)HA4_k(28)HA4_k(29)HA4_k(30) HA4_k(31)HA4_k(32)HA4_k(33)HA4_k(34) #undef HA4_k } hipError_t status = hipGetLastError(); if (status != hipSuccess) { fprintf(stderr, "%s\n", hipGetErrorString(status)); } } int main() { int *gpu_adj; unsigned *gpu_part; unsigned long long *gpu_ret; scanf("%d", &n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i != j) adj[i*n+j] = rand()>>5&1; } } for (int i = 0; i < n; i++) { char op; for (int j = 0; j < n; j++) { if (scanf(" %c", &op) == 1 && i != j) { adj[i*n+j] = op == '1'; } } } for (int i = 0; i <= 64; i++) { nCr[i][0] = nCr[i][i] = 1; for (int j = 1; j < i; j++) nCr[i][j] = nCr[i-1][j-1] + nCr[i-1][j]; } hipMalloc(&gpu_part, sizeof part); hipMalloc(&gpu_adj, sizeof adj); hipMalloc(&gpu_ret, sizeof ret); hipMemcpy(gpu_adj, adj, sizeof adj, hipMemcpyHostToDevice); unsigned long long ans = 0; unsigned long long mod = 0; for (int k = 1; k <= n-1; k++) { long long works = nCr[n-1][k]; while (works > 0) { long long wo = works; if (works > WORK_LIMIT) wo = WORK_LIMIT; works -= wo; // split too big work int blockSize = wo; if (blockSize > MAX_BLOCK_SIZE / k) blockSize = MAX_BLOCK_SIZE / k; int gridSize = wo / blockSize; if (blockSize * gridSize > MAX_ARRAY_SIZE) gridSize = MAX_ARRAY_SIZE / blockSize; int totSize = blockSize * gridSize; fprintf(stderr, "block size = (%d,%d,1) grid size = (%d,1,1) work = %lld\n", k, blockSize, gridSize, wo); //for (int j = 0; j < wo; j++) printf("%d,", getComb(j, n-1, k)); for (int j = 0; j < totSize; j++) { int step = wo / totSize * j; if (j < wo % totSize) step += j; else step += wo % totSize; part[j] = getComb(works + step, n-1, k); } hipMemcpy(gpu_part, part, sizeof(int) * totSize, hipMemcpyHostToDevice); ha4(gridSize, blockSize, k, n, wo, gpu_part, gpu_adj, gpu_ret, mod); hipDeviceSynchronize(); hipMemcpy(ret, gpu_ret, sizeof(long long) * totSize, hipMemcpyDeviceToHost); unsigned long long sum = 0; for (int j = 0; j < totSize; j++) { sum = mod_sum64(sum, ret[j], 0); } if ((n-k)%2 == 1) ans = mod_sum64(ans, sum, mod); else if (sum != 0) ans = mod_sum64(ans, mod-sum, mod); } } printf("ans = %llu\n", ans); hipFree(gpu_ret); hipFree(gpu_adj); hipFree(gpu_part); return 0; }
96ee82780bd969233f4af08c43b528a0d546dc80.cu
// counting Hamilton cycle, CUDA acceleration #include<stdio.h> #include<stdlib.h> #define MAX_BLOCK_SIZE 256 #define MAX_ARRAY_SIZE (1024*8) #define WORK_LIMIT 10000000 typedef unsigned long long u64; // any 2 <= mod <= 2^31 should work __host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) { unsigned c = a+b; return c >= mod ? c-mod : c; } __host__ __device__ u64 mod_sum64(u64 a, u64 b, u64 mod) { u64 c = a+b; return c >= mod ? c-mod : c; } template<int k> __launch_bounds__(MAX_BLOCK_SIZE) __global__ void ha2(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) { __shared__ unsigned long long qc[1024]; // transition count __shared__ unsigned long long ai[64]; // adjacency matrix as bitset //const int k = blockDim.x; const int tid = threadIdx.x; const int bid = threadIdx.y + blockIdx.x * blockDim.y; const int sha = threadIdx.y * k; const int gridSize = blockDim.y * gridDim.x; unsigned long long s = part[bid]; unsigned long long mask = (1ull<<k) - 1; unsigned long long total = 0; // fetch adjacency matrix for (int i = tid+sha; i < n; i += blockDim.y * k) { unsigned long long aa = 0; for (int j = 0; j < n; j++) { aa = aa | static_cast<unsigned long long>(adj[i * n + j]) << j; } ai[i] = aa; } __syncthreads(); for (int runs = 0; runs < work; runs += gridSize) { unsigned at; { unsigned long long row = s; for (int i = 0; i < tid; i++) { row = row & (row-1); } at = __ffsll(row)-1; } // making row "long long" would make program 3x slow, so I use 2 unsigned int unsigned row = 0, row2 = 0; { // build transition table unsigned long long me = ai[at]; for (int i = n-2; i >= 0; i--) { if (s>>i & 1) { row2 = row2 << 1 | row >> 31; row = row + row + (me>>i & 1); } } // initial state qc[tid+sha] = (me >> (n-1)) & 1; __syncthreads(); } // calculate each transition, uses GPU SIMD feature for (int t = 1; t < n-1; t++) { unsigned long long sum = 0; unsigned rr = row; for (int i = 0; i < min(k, 32); i++) { //sum = mod_sum(sum, qc[i+sha] * (row>>i & 1), mod); //sum = mod_sum64(sum, qc[i+sha] * (rr & 1), mod); //sum = mod_sum64(sum, qc[i+sha] * dd[i], mod); sum = mod_sum64(sum, qc[i+sha] & 0LL-(rr & 1), mod); rr >>= 1; } if (k > 32) { rr = row2; for (int i = 0; i < k-32; i++) { sum = mod_sum64(sum, qc[i+32+sha] & 0ULL-(rr & 1), mod); rr >>= 1; } } __syncthreads(); qc[tid+sha] = sum; __syncthreads(); } // last transition { if (!(ai[n-1] >> at & 1)) qc[tid+sha] = 0; __syncthreads(); unsigned long long count = 0; for (int i = 0; i < k; i++) { count = mod_sum64(count, qc[i+sha], mod); } //if (tid==0) printf("[%d:%d],", s, count); if (runs + bid < work) { total = mod_sum64(count, total, mod); } } // get next work unsigned bit = s & (-s); s += bit; s |= mask >> __popcll(s); __syncthreads(); } if (tid == 0) { // output total for this block ret[bid] = total; } } // k=1 is useless actually template<> __global__ void ha2<1>(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) { const int bid = threadIdx.y + blockIdx.x * blockDim.y; ret[bid] = 0; } int n; int adj[64*64]; unsigned part[MAX_ARRAY_SIZE]; unsigned long long ret[MAX_ARRAY_SIZE]; long long nCr[65][65]; u64 getComb(long long idx, int n, int r) { u64 ans = 0; n -= 1; while (r > 0) { if (idx < nCr[n][r]) n -= 1; else { ans |= u64(1)<<(n); idx -= nCr[n][r]; n -= 1; r -= 1; } } return ans; } void ha4(int gridSize, int blockSize, int k, int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) { dim3 bsz(k, blockSize); switch (k) { #define HA4_k(k) case k: ha2<k><<<gridSize, bsz>>>(n, work, part, adj, ret, mod); break; HA4_k(1)HA4_k(2)HA4_k(3)HA4_k(4)HA4_k(5) HA4_k(6)HA4_k(7)HA4_k(8)HA4_k(9)HA4_k(10) HA4_k(11)HA4_k(12)HA4_k(13)HA4_k(14)HA4_k(15) HA4_k(16)HA4_k(17)HA4_k(18)HA4_k(19)HA4_k(20) HA4_k(21)HA4_k(22)HA4_k(23)HA4_k(24)HA4_k(25) HA4_k(26)HA4_k(27)HA4_k(28)HA4_k(29)HA4_k(30) HA4_k(31)HA4_k(32)HA4_k(33)HA4_k(34) #undef HA4_k } cudaError_t status = cudaGetLastError(); if (status != cudaSuccess) { fprintf(stderr, "%s\n", cudaGetErrorString(status)); } } int main() { int *gpu_adj; unsigned *gpu_part; unsigned long long *gpu_ret; scanf("%d", &n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i != j) adj[i*n+j] = rand()>>5&1; } } for (int i = 0; i < n; i++) { char op; for (int j = 0; j < n; j++) { if (scanf(" %c", &op) == 1 && i != j) { adj[i*n+j] = op == '1'; } } } for (int i = 0; i <= 64; i++) { nCr[i][0] = nCr[i][i] = 1; for (int j = 1; j < i; j++) nCr[i][j] = nCr[i-1][j-1] + nCr[i-1][j]; } cudaMalloc(&gpu_part, sizeof part); cudaMalloc(&gpu_adj, sizeof adj); cudaMalloc(&gpu_ret, sizeof ret); cudaMemcpy(gpu_adj, adj, sizeof adj, cudaMemcpyHostToDevice); unsigned long long ans = 0; unsigned long long mod = 0; for (int k = 1; k <= n-1; k++) { long long works = nCr[n-1][k]; while (works > 0) { long long wo = works; if (works > WORK_LIMIT) wo = WORK_LIMIT; works -= wo; // split too big work int blockSize = wo; if (blockSize > MAX_BLOCK_SIZE / k) blockSize = MAX_BLOCK_SIZE / k; int gridSize = wo / blockSize; if (blockSize * gridSize > MAX_ARRAY_SIZE) gridSize = MAX_ARRAY_SIZE / blockSize; int totSize = blockSize * gridSize; fprintf(stderr, "block size = (%d,%d,1) grid size = (%d,1,1) work = %lld\n", k, blockSize, gridSize, wo); //for (int j = 0; j < wo; j++) printf("%d,", getComb(j, n-1, k)); for (int j = 0; j < totSize; j++) { int step = wo / totSize * j; if (j < wo % totSize) step += j; else step += wo % totSize; part[j] = getComb(works + step, n-1, k); } cudaMemcpy(gpu_part, part, sizeof(int) * totSize, cudaMemcpyHostToDevice); ha4(gridSize, blockSize, k, n, wo, gpu_part, gpu_adj, gpu_ret, mod); cudaDeviceSynchronize(); cudaMemcpy(ret, gpu_ret, sizeof(long long) * totSize, cudaMemcpyDeviceToHost); unsigned long long sum = 0; for (int j = 0; j < totSize; j++) { sum = mod_sum64(sum, ret[j], 0); } if ((n-k)%2 == 1) ans = mod_sum64(ans, sum, mod); else if (sum != 0) ans = mod_sum64(ans, mod-sum, mod); } } printf("ans = %llu\n", ans); cudaFree(gpu_ret); cudaFree(gpu_adj); cudaFree(gpu_part); return 0; }
1dde5819ec9eb11ad53f815ad921399bcbd7f6d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************** |> SANCHES YUCRA YHON YERSON *****************************/ #include <iostream> #include <time.h> #include <stdio.h> #include "opencv2/opencv.hpp" using namespace cv; using namespace std; #define BLUR_SIZE 32 __global__ void rgb2grayincuda(uchar3 *const d_in, unsigned char *const d_out, uint imgheight, uint imgwidth) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < imgwidth && idy < imgheight) { uchar3 rgb = d_in[idy * imgwidth + idx]; d_out[idy * imgwidth + idx] = 0.299f * rgb.x + 0.587f * rgb.y + 0.114f * rgb.z; } } __global__ void blurKernel(uchar3 *const d_in, uchar3 *const d_out, uint imgheight, uint imgwidth) { const unsigned int Col = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < imgheight && Row < imgwidth) { int pixValx = 0; int pixValy = 0; int pixValz = 0; int pixels = 0; for (int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE + 1; ++blurRow) { for (int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE + 1; ++blurCol) { int curRow = Row + blurRow; int curCol = Col + blurCol; if (curRow > -1 && curRow < imgheight && curCol > -1 && curCol < imgwidth) { pixValx += d_in[curRow * imgwidth + curCol].x; pixValy += d_in[curRow * imgwidth + curCol].y; pixValz += d_in[curRow * imgwidth + curCol].z; pixels++; } } } d_out[Row * imgwidth + Col].x = (unsigned char)(pixValx / pixels); d_out[Row * imgwidth + Col].y = (unsigned char)(pixValy / pixels); d_out[Row * imgwidth + Col].z = (unsigned char)(pixValz / pixels); } } int main(void) { Mat srcImage = imread("./Lenna.png"); const uint imgheight = srcImage.rows; const uint imgwidth = srcImage.cols; Mat grayImage(imgheight, imgwidth, CV_8UC1, Scalar(0)); uchar3 *d_in; unsigned char *d_out; hipMalloc((void **)&d_in, imgheight * imgwidth * sizeof(uchar3)); hipMalloc((void **)&d_out, imgheight * imgwidth * sizeof(unsigned char)); hipMemcpy(d_in, srcImage.data, imgheight * imgwidth * sizeof(uchar3), hipMemcpyHostToDevice); dim3 threadsPerBlock(32, 32); dim3 blocksPerGrid((imgwidth + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgheight + threadsPerBlock.y - 1) / threadsPerBlock.y); hipLaunchKernelGGL(( rgb2grayincuda), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_in, d_out, imgheight, imgwidth); hipMemcpy(grayImage.data, d_out, imgheight * imgwidth * sizeof(unsigned char), hipMemcpyDeviceToHost); hipFree(d_out); imwrite("greyImage.jpg", grayImage); // blur Mat blurImage(imgheight, imgwidth, CV_8UC3); uchar3 *d_out2; hipMalloc((void **)&d_out2, imgheight * imgwidth * sizeof(uchar3)); hipLaunchKernelGGL(( blurKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_in, d_out2, imgheight, imgwidth); hipMemcpy(blurImage.data, d_out2, imgheight * imgwidth * sizeof(uchar3), hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out2); imwrite("blurImage32.jpg", blurImage); return 0; }
1dde5819ec9eb11ad53f815ad921399bcbd7f6d6.cu
/***************************** |> SANCHES YUCRA YHON YERSON *****************************/ #include <iostream> #include <time.h> #include <stdio.h> #include "opencv2/opencv.hpp" using namespace cv; using namespace std; #define BLUR_SIZE 32 __global__ void rgb2grayincuda(uchar3 *const d_in, unsigned char *const d_out, uint imgheight, uint imgwidth) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < imgwidth && idy < imgheight) { uchar3 rgb = d_in[idy * imgwidth + idx]; d_out[idy * imgwidth + idx] = 0.299f * rgb.x + 0.587f * rgb.y + 0.114f * rgb.z; } } __global__ void blurKernel(uchar3 *const d_in, uchar3 *const d_out, uint imgheight, uint imgwidth) { const unsigned int Col = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < imgheight && Row < imgwidth) { int pixValx = 0; int pixValy = 0; int pixValz = 0; int pixels = 0; for (int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE + 1; ++blurRow) { for (int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE + 1; ++blurCol) { int curRow = Row + blurRow; int curCol = Col + blurCol; if (curRow > -1 && curRow < imgheight && curCol > -1 && curCol < imgwidth) { pixValx += d_in[curRow * imgwidth + curCol].x; pixValy += d_in[curRow * imgwidth + curCol].y; pixValz += d_in[curRow * imgwidth + curCol].z; pixels++; } } } d_out[Row * imgwidth + Col].x = (unsigned char)(pixValx / pixels); d_out[Row * imgwidth + Col].y = (unsigned char)(pixValy / pixels); d_out[Row * imgwidth + Col].z = (unsigned char)(pixValz / pixels); } } int main(void) { Mat srcImage = imread("./Lenna.png"); const uint imgheight = srcImage.rows; const uint imgwidth = srcImage.cols; Mat grayImage(imgheight, imgwidth, CV_8UC1, Scalar(0)); uchar3 *d_in; unsigned char *d_out; cudaMalloc((void **)&d_in, imgheight * imgwidth * sizeof(uchar3)); cudaMalloc((void **)&d_out, imgheight * imgwidth * sizeof(unsigned char)); cudaMemcpy(d_in, srcImage.data, imgheight * imgwidth * sizeof(uchar3), cudaMemcpyHostToDevice); dim3 threadsPerBlock(32, 32); dim3 blocksPerGrid((imgwidth + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgheight + threadsPerBlock.y - 1) / threadsPerBlock.y); rgb2grayincuda<<<blocksPerGrid, threadsPerBlock>>>(d_in, d_out, imgheight, imgwidth); cudaMemcpy(grayImage.data, d_out, imgheight * imgwidth * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(d_out); imwrite("greyImage.jpg", grayImage); // blur Mat blurImage(imgheight, imgwidth, CV_8UC3); uchar3 *d_out2; cudaMalloc((void **)&d_out2, imgheight * imgwidth * sizeof(uchar3)); blurKernel<<<blocksPerGrid, threadsPerBlock>>>(d_in, d_out2, imgheight, imgwidth); cudaMemcpy(blurImage.data, d_out2, imgheight * imgwidth * sizeof(uchar3), cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out2); imwrite("blurImage32.jpg", blurImage); return 0; }
77cd3afc93a51a24d98de32de7b7427d9173c737.hip
// !!! This is a file automatically generated by hipify!!! /*----------------------------------------------------------- ** gaussian.cu -- The program is to solve a linear system Ax = b ** by using Gaussian Elimination. The algorithm on page 101 ** ("Foundations of Parallel Programming") is used. ** The sequential version is gaussian.c. This parallel ** implementation converts three independent for() loops ** into three Fans. Use the data file ge_3.dat to verify ** the correction of the output. ** ** Written by Andreas Kura, 02/15/95 ** Modified by Chong-wei Xu, 04/20/95 ** Modified by Chris Gregg for CUDA, 07/20/2009 **----------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include <string.h> #include <math.h> #define STR_SIZE 256 #ifdef RD_WG_SIZE_0_0 #define MAXBLOCKSIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define MAXBLOCKSIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define MAXBLOCKSIZE RD_WG_SIZE #else #define MAXBLOCKSIZE 512 #endif //2D defines. Go from specific to general #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_XY RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_XY RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_XY RD_WG_SIZE #else #define BLOCK_SIZE_XY 4 #endif typedef double DT; int Size; float *a, *b, *finalVec; float *m; FILE *fp; void InitProblemOnce(char *filename); void InitPerRun(); void ForwardSub(DT* f); void BackSub(); __global__ void Fan1(float *m, float *a, int Size, int t); __global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t); void InitMat(float *ary, int nrow, int ncol); void InitAry(float *ary, int ary_size); void PrintMat(float *ary, int nrow, int ncolumn); void PrintAry(float *ary, int ary_size); void PrintDeviceProperties(); void checkCUDAError(const char *msg); void print(float *ary, int ary_size); unsigned int totalKernelTime = 0; // create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 void create_matrix(float *m, int size){ int i,j; float lamda = -0.01; float coe[2*size-1]; float coe_i =0.0; for (i=0; i < size; i++) { coe_i = 10*exp(lamda*i); j=size-1+i; coe[j]=coe_i; j=size-1-i; coe[j]=coe_i; } for (i=0; i < size; i++) { for (j=0; j < size; j++) { m[i*size+j]=coe[size-1-i+j]; } } } // print the result to a file void print(float *ary, int ary_size){ FILE* fp; fp=fopen("out.txt","w"); char str[STR_SIZE]; if(!fp) { printf("Error writing!"); return; } //sprintf(str,"%d",NI); //fputs(str,fp); int i; for (i = 0 ; i < ary_size ; ++i) { sprintf(str,"%f\n",ary[i]); fputs(str,fp); } fclose(fp); } int main(int argc, char *argv[]) { DT* f; f = (DT*)malloc(2*sizeof(DT)); printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY); int verbose = 1; int i, j; char flag; if (argc < 2) { printf("Usage: gaussian -f filename / -s size [-q]\n\n"); printf("-q (quiet) suppresses printing the matrix and result values.\n"); printf("-f (filename) path of input file\n"); printf("-s (size) size of matrix. Create matrix and rhs in this program \n"); printf("The first line of the file contains the dimension of the matrix, n."); printf("The second line of the file is a newline.\n"); printf("The next n lines contain n tab separated values for the matrix."); printf("The next line of the file is a newline.\n"); printf("The next line of the file is a 1xn vector with tab separated values.\n"); printf("The next line of the file is a newline. (optional)\n"); printf("The final line of the file is the pre-computed solution. (optional)\n"); printf("Example: matrix4.txt:\n"); printf("4\n"); printf("\n"); printf("-0.6 -0.5 0.7 0.3\n"); printf("-0.3 -0.9 0.3 0.7\n"); printf("-0.4 -0.5 -0.3 -0.8\n"); printf("0.0 -0.1 0.2 0.9\n"); printf("\n"); printf("-0.85 -0.68 0.24 -0.53\n"); printf("\n"); printf("0.7 0.0 -0.4 -0.5\n"); exit(0); } //PrintDeviceProperties(); //char filename[100]; //sprintf(filename,"matrices/matrix%d.txt",size); for(i=1;i<argc;i++) { if (argv[i][0]=='-') {// flag flag = argv[i][1]; switch (flag) { case 's': // platform i++; Size = atoi(argv[i]); printf("Create matrix internally in parse, size = %d \n", Size); a = (float *) malloc(Size * Size * sizeof(float)); create_matrix(a, Size); b = (float *) malloc(Size * sizeof(float)); for (j =0; j< Size; j++) b[j]=1.0; m = (float *) malloc(Size * Size * sizeof(float)); break; case 'f': // platform i++; printf("Read file from %s \n", argv[i]); InitProblemOnce(argv[i]); break; case 'q': // quiet verbose = 0; break; } } } //InitProblemOnce(filename); InitPerRun(); //begin timing struct timeval time_start; gettimeofday(&time_start, NULL); // run kernels ForwardSub(f); //end timing struct timeval time_end; gettimeofday(&time_end, NULL); unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); if (verbose) { //printf("Matrix m is: \n"); //PrintMat(m, Size, Size); //printf("Matrix a is: \n"); //PrintMat(a, Size, Size); printf("Array b is: \n"); //PrintAry(b, Size); } BackSub(); if (verbose) { printf("The final solution is: \n"); //PrintAry(finalVec,Size); print(finalVec,Size); } printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6); printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6); /*printf("%d,%d\n",size,time_total); fprintf(stderr,"%d,%d\n",size,time_total);*/ printf("%x %x\n",*(int *)&(f[0]),*(int *)&(f[1])); free(m); free(a); free(b); } /*------------------------------------------------------ ** PrintDeviceProperties **----------------------------------------------------- */ void PrintDeviceProperties(){ hipDeviceProp_t deviceProp; int nDevCount = 0; hipGetDeviceCount( &nDevCount ); printf( "Total Device found: %d", nDevCount ); for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx ) { memset( &deviceProp, 0, sizeof(deviceProp)); if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx)) { printf( "\nDevice Name \t\t - %s ", deviceProp.name ); printf( "\n**************************************"); printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 ); printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 ); printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock ); printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize ); printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch ); printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock ); printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] ); printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] ); printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem ); printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor ); printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate ); printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment ); printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" ); printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount ); } else printf( "\n%s", hipGetErrorString(hipGetLastError())); } } /*------------------------------------------------------ ** InitProblemOnce -- Initialize all of matrices and ** vectors by opening a data file specified by the user. ** ** We used dynamic array *a, *b, and *m to allocate ** the memory storages. **------------------------------------------------------ */ void InitProblemOnce(char *filename) { //char *filename = argv[1]; //printf("Enter the data file name: "); //scanf("%s", filename); //printf("The file name is: %s\n", filename); fp = fopen(filename, "r"); fscanf(fp, "%d", &Size); a = (float *) malloc(Size * Size * sizeof(float)); InitMat(a, Size, Size); //printf("The input matrix a is:\n"); //PrintMat(a, Size, Size); b = (float *) malloc(Size * sizeof(float)); InitAry(b, Size); //printf("The input array b is:\n"); //PrintAry(b, Size); m = (float *) malloc(Size * Size * sizeof(float)); } /*------------------------------------------------------ ** InitPerRun() -- Initialize the contents of the ** multipier matrix **m **------------------------------------------------------ */ void InitPerRun() { int i; for (i=0; i<Size*Size; i++) *(m+i) = 0.0; } /*------------------------------------------------------- ** Fan1() -- Calculate multiplier matrix ** Pay attention to the index. Index i give the range ** which starts from 0 to range-1. The real values of ** the index should be adjust and related with the value ** of t which is defined on the ForwardSub(). **------------------------------------------------------- */ __global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t,DT* f) { //if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf("."); //printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t); if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t); } /*------------------------------------------------------- ** Fan2() -- Modify the matrix A into LUD **------------------------------------------------------- */ __global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t,DT* f) { if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return; int xidx = blockIdx.x * blockDim.x + threadIdx.x; int yidx = blockIdx.y * blockDim.y + threadIdx.y; //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)]; //a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t]; if(yidx == 0){ //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); //printf("xidx:%d,yidx:%d\n",xidx,yidx); b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t]; } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ void ForwardSub(DT* f) { int t; float *m_cuda,*a_cuda,*b_cuda; DT *F_gpu; hipMalloc((void **)&F_gpu, sizeof(DT) *2); // allocate memory on GPU hipMalloc((void **) &m_cuda, Size * Size * sizeof(float)); hipMalloc((void **) &a_cuda, Size * Size * sizeof(float)); hipMalloc((void **) &b_cuda, Size * sizeof(float)); // copy memory to GPU hipMemcpy(m_cuda, m, Size * Size * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy(a_cuda, a, Size * Size * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy(b_cuda, b, Size * sizeof(float),hipMemcpyHostToDevice ); hipMemcpy(F_gpu, f, sizeof(DT) *2, hipMemcpyHostToDevice); int block_size,grid_size; block_size = MAXBLOCKSIZE; grid_size = (Size/block_size) + (!(Size%block_size)? 0:1); //printf("1d grid size: %d\n",grid_size); dim3 dimBlock(block_size); dim3 dimGrid(grid_size); //dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); int blockSize2d, gridSize2d; blockSize2d = BLOCK_SIZE_XY; gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1)); dim3 dimBlockXY(blockSize2d,blockSize2d); dim3 dimGridXY(gridSize2d,gridSize2d); // begin timing kernels struct timeval time_start; gettimeofday(&time_start, NULL); for (t=0; t<(Size-1); t++) { hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t,F_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t,F_gpu); hipDeviceSynchronize(); checkCUDAError("Fan2"); } // end timing kernels struct timeval time_end; gettimeofday(&time_end, NULL); totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); // copy memory back to CPU hipMemcpy(m, m_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost ); hipMemcpy(a, a_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost ); hipMemcpy(b, b_cuda, Size * sizeof(float),hipMemcpyDeviceToHost ); hipMemcpy(f, F_gpu, sizeof(DT) *2, hipMemcpyDeviceToHost); hipFree(m_cuda); hipFree(a_cuda); hipFree(b_cuda); } /*------------------------------------------------------ ** BackSub() -- Backward substitution **------------------------------------------------------ */ void BackSub() { // create a new vector to hold the final answer finalVec = (float *) malloc(Size * sizeof(float)); // solve "bottom up" int i,j; for(i=0;i<Size;i++){ finalVec[Size-i-1]=b[Size-i-1]; for(j=0;j<i;j++) { finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1]; } finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1)); } } void InitMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { fscanf(fp, "%f", ary+Size*i+j); } } } /*------------------------------------------------------ ** PrintMat() -- Print the contents of the matrix **------------------------------------------------------ */ void PrintMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { printf("%8.2f ", *(ary+Size*i+j)); } printf("\n"); } printf("\n"); } /*------------------------------------------------------ ** InitAry() -- Initialize the array (vector) by reading ** data from the data file **------------------------------------------------------ */ void InitAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { fscanf(fp, "%f", &ary[i]); } } /*------------------------------------------------------ ** PrintAry() -- Print the contents of the array (vector) **------------------------------------------------------ */ void PrintAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { printf("%.2f ", ary[i]); } printf("\n\n"); } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
77cd3afc93a51a24d98de32de7b7427d9173c737.cu
/*----------------------------------------------------------- ** gaussian.cu -- The program is to solve a linear system Ax = b ** by using Gaussian Elimination. The algorithm on page 101 ** ("Foundations of Parallel Programming") is used. ** The sequential version is gaussian.c. This parallel ** implementation converts three independent for() loops ** into three Fans. Use the data file ge_3.dat to verify ** the correction of the output. ** ** Written by Andreas Kura, 02/15/95 ** Modified by Chong-wei Xu, 04/20/95 ** Modified by Chris Gregg for CUDA, 07/20/2009 **----------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "cuda.h" #include <string.h> #include <math.h> #define STR_SIZE 256 #ifdef RD_WG_SIZE_0_0 #define MAXBLOCKSIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define MAXBLOCKSIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define MAXBLOCKSIZE RD_WG_SIZE #else #define MAXBLOCKSIZE 512 #endif //2D defines. Go from specific to general #ifdef RD_WG_SIZE_1_0 #define BLOCK_SIZE_XY RD_WG_SIZE_1_0 #elif defined(RD_WG_SIZE_1) #define BLOCK_SIZE_XY RD_WG_SIZE_1 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE_XY RD_WG_SIZE #else #define BLOCK_SIZE_XY 4 #endif typedef double DT; int Size; float *a, *b, *finalVec; float *m; FILE *fp; void InitProblemOnce(char *filename); void InitPerRun(); void ForwardSub(DT* f); void BackSub(); __global__ void Fan1(float *m, float *a, int Size, int t); __global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t); void InitMat(float *ary, int nrow, int ncol); void InitAry(float *ary, int ary_size); void PrintMat(float *ary, int nrow, int ncolumn); void PrintAry(float *ary, int ary_size); void PrintDeviceProperties(); void checkCUDAError(const char *msg); void print(float *ary, int ary_size); unsigned int totalKernelTime = 0; // create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 void create_matrix(float *m, int size){ int i,j; float lamda = -0.01; float coe[2*size-1]; float coe_i =0.0; for (i=0; i < size; i++) { coe_i = 10*exp(lamda*i); j=size-1+i; coe[j]=coe_i; j=size-1-i; coe[j]=coe_i; } for (i=0; i < size; i++) { for (j=0; j < size; j++) { m[i*size+j]=coe[size-1-i+j]; } } } // print the result to a file void print(float *ary, int ary_size){ FILE* fp; fp=fopen("out.txt","w"); char str[STR_SIZE]; if(!fp) { printf("Error writing!"); return; } //sprintf(str,"%d",NI); //fputs(str,fp); int i; for (i = 0 ; i < ary_size ; ++i) { sprintf(str,"%f\n",ary[i]); fputs(str,fp); } fclose(fp); } int main(int argc, char *argv[]) { DT* f; f = (DT*)malloc(2*sizeof(DT)); printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY); int verbose = 1; int i, j; char flag; if (argc < 2) { printf("Usage: gaussian -f filename / -s size [-q]\n\n"); printf("-q (quiet) suppresses printing the matrix and result values.\n"); printf("-f (filename) path of input file\n"); printf("-s (size) size of matrix. Create matrix and rhs in this program \n"); printf("The first line of the file contains the dimension of the matrix, n."); printf("The second line of the file is a newline.\n"); printf("The next n lines contain n tab separated values for the matrix."); printf("The next line of the file is a newline.\n"); printf("The next line of the file is a 1xn vector with tab separated values.\n"); printf("The next line of the file is a newline. (optional)\n"); printf("The final line of the file is the pre-computed solution. (optional)\n"); printf("Example: matrix4.txt:\n"); printf("4\n"); printf("\n"); printf("-0.6 -0.5 0.7 0.3\n"); printf("-0.3 -0.9 0.3 0.7\n"); printf("-0.4 -0.5 -0.3 -0.8\n"); printf("0.0 -0.1 0.2 0.9\n"); printf("\n"); printf("-0.85 -0.68 0.24 -0.53\n"); printf("\n"); printf("0.7 0.0 -0.4 -0.5\n"); exit(0); } //PrintDeviceProperties(); //char filename[100]; //sprintf(filename,"matrices/matrix%d.txt",size); for(i=1;i<argc;i++) { if (argv[i][0]=='-') {// flag flag = argv[i][1]; switch (flag) { case 's': // platform i++; Size = atoi(argv[i]); printf("Create matrix internally in parse, size = %d \n", Size); a = (float *) malloc(Size * Size * sizeof(float)); create_matrix(a, Size); b = (float *) malloc(Size * sizeof(float)); for (j =0; j< Size; j++) b[j]=1.0; m = (float *) malloc(Size * Size * sizeof(float)); break; case 'f': // platform i++; printf("Read file from %s \n", argv[i]); InitProblemOnce(argv[i]); break; case 'q': // quiet verbose = 0; break; } } } //InitProblemOnce(filename); InitPerRun(); //begin timing struct timeval time_start; gettimeofday(&time_start, NULL); // run kernels ForwardSub(f); //end timing struct timeval time_end; gettimeofday(&time_end, NULL); unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); if (verbose) { //printf("Matrix m is: \n"); //PrintMat(m, Size, Size); //printf("Matrix a is: \n"); //PrintMat(a, Size, Size); printf("Array b is: \n"); //PrintAry(b, Size); } BackSub(); if (verbose) { printf("The final solution is: \n"); //PrintAry(finalVec,Size); print(finalVec,Size); } printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6); printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6); /*printf("%d,%d\n",size,time_total); fprintf(stderr,"%d,%d\n",size,time_total);*/ printf("%x %x\n",*(int *)&(f[0]),*(int *)&(f[1])); free(m); free(a); free(b); } /*------------------------------------------------------ ** PrintDeviceProperties **----------------------------------------------------- */ void PrintDeviceProperties(){ cudaDeviceProp deviceProp; int nDevCount = 0; cudaGetDeviceCount( &nDevCount ); printf( "Total Device found: %d", nDevCount ); for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx ) { memset( &deviceProp, 0, sizeof(deviceProp)); if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx)) { printf( "\nDevice Name \t\t - %s ", deviceProp.name ); printf( "\n**************************************"); printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 ); printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 ); printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock ); printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize ); printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch ); printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock ); printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] ); printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] ); printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem ); printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor ); printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate ); printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment ); printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" ); printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount ); } else printf( "\n%s", cudaGetErrorString(cudaGetLastError())); } } /*------------------------------------------------------ ** InitProblemOnce -- Initialize all of matrices and ** vectors by opening a data file specified by the user. ** ** We used dynamic array *a, *b, and *m to allocate ** the memory storages. **------------------------------------------------------ */ void InitProblemOnce(char *filename) { //char *filename = argv[1]; //printf("Enter the data file name: "); //scanf("%s", filename); //printf("The file name is: %s\n", filename); fp = fopen(filename, "r"); fscanf(fp, "%d", &Size); a = (float *) malloc(Size * Size * sizeof(float)); InitMat(a, Size, Size); //printf("The input matrix a is:\n"); //PrintMat(a, Size, Size); b = (float *) malloc(Size * sizeof(float)); InitAry(b, Size); //printf("The input array b is:\n"); //PrintAry(b, Size); m = (float *) malloc(Size * Size * sizeof(float)); } /*------------------------------------------------------ ** InitPerRun() -- Initialize the contents of the ** multipier matrix **m **------------------------------------------------------ */ void InitPerRun() { int i; for (i=0; i<Size*Size; i++) *(m+i) = 0.0; } /*------------------------------------------------------- ** Fan1() -- Calculate multiplier matrix ** Pay attention to the index. Index i give the range ** which starts from 0 to range-1. The real values of ** the index should be adjust and related with the value ** of t which is defined on the ForwardSub(). **------------------------------------------------------- */ __global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t,DT* f) { //if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf("."); //printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t); if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t); } /*------------------------------------------------------- ** Fan2() -- Modify the matrix A into LUD **------------------------------------------------------- */ __global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t,DT* f) { if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return; int xidx = blockIdx.x * blockDim.x + threadIdx.x; int yidx = blockIdx.y * blockDim.y + threadIdx.y; //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)]; //a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t]; if(yidx == 0){ //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); //printf("xidx:%d,yidx:%d\n",xidx,yidx); b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t]; } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ void ForwardSub(DT* f) { int t; float *m_cuda,*a_cuda,*b_cuda; DT *F_gpu; cudaMalloc((void **)&F_gpu, sizeof(DT) *2); // allocate memory on GPU cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float)); cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float)); cudaMalloc((void **) &b_cuda, Size * sizeof(float)); // copy memory to GPU cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice ); cudaMemcpy(F_gpu, f, sizeof(DT) *2, cudaMemcpyHostToDevice); int block_size,grid_size; block_size = MAXBLOCKSIZE; grid_size = (Size/block_size) + (!(Size%block_size)? 0:1); //printf("1d grid size: %d\n",grid_size); dim3 dimBlock(block_size); dim3 dimGrid(grid_size); //dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); int blockSize2d, gridSize2d; blockSize2d = BLOCK_SIZE_XY; gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1)); dim3 dimBlockXY(blockSize2d,blockSize2d); dim3 dimGridXY(gridSize2d,gridSize2d); // begin timing kernels struct timeval time_start; gettimeofday(&time_start, NULL); for (t=0; t<(Size-1); t++) { Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t,F_gpu); cudaThreadSynchronize(); Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t,F_gpu); cudaThreadSynchronize(); checkCUDAError("Fan2"); } // end timing kernels struct timeval time_end; gettimeofday(&time_end, NULL); totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); // copy memory back to CPU cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost ); cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost ); cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost ); cudaMemcpy(f, F_gpu, sizeof(DT) *2, cudaMemcpyDeviceToHost); cudaFree(m_cuda); cudaFree(a_cuda); cudaFree(b_cuda); } /*------------------------------------------------------ ** BackSub() -- Backward substitution **------------------------------------------------------ */ void BackSub() { // create a new vector to hold the final answer finalVec = (float *) malloc(Size * sizeof(float)); // solve "bottom up" int i,j; for(i=0;i<Size;i++){ finalVec[Size-i-1]=b[Size-i-1]; for(j=0;j<i;j++) { finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1]; } finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1)); } } void InitMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { fscanf(fp, "%f", ary+Size*i+j); } } } /*------------------------------------------------------ ** PrintMat() -- Print the contents of the matrix **------------------------------------------------------ */ void PrintMat(float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { printf("%8.2f ", *(ary+Size*i+j)); } printf("\n"); } printf("\n"); } /*------------------------------------------------------ ** InitAry() -- Initialize the array (vector) by reading ** data from the data file **------------------------------------------------------ */ void InitAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { fscanf(fp, "%f", &ary[i]); } } /*------------------------------------------------------ ** PrintAry() -- Print the contents of the array (vector) **------------------------------------------------------ */ void PrintAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { printf("%.2f ", ary[i]); } printf("\n\n"); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
71a6338a38ae9efcf3b5ea827531fb92711c08ab.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "taso/ops.h" #include "taso/cuda_helper.h" using namespace taso; void Matmul::map(void) { // create descriptors checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); helperSetTensorDescriptor(outputs[0], outputTensor); if (activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } // allocate tensors size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize)); } void Matmul::unmap(void) { checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor)); if (activation != AC_MODE_NONE) { checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc)); } checkCUDA(hipFree(outputs[0].data_ptr)); } void Matmul::forward(bool block) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = outputs[0].numDim; int m = inputs[0].dim[numDim-2]; int n = inputs[1].dim[numDim-1]; int k = inputs[0].dim[numDim-1]; hipblasOperation_t transA, transB; int lda, ldb, ldc; if (inputs[0].stride[numDim-2] == 1) { transA = HIPBLAS_OP_N; lda = inputs[0].stride[numDim-1]; } else { assert(inputs[0].stride[numDim-1] == 1); transA = HIPBLAS_OP_T; lda = inputs[0].stride[numDim-2]; } if (inputs[1].stride[numDim-2] == 1) { transB = HIPBLAS_OP_N; ldb = inputs[1].stride[numDim-1]; } else { assert(inputs[1].stride[numDim-1] == 1); transB = HIPBLAS_OP_T; ldb = inputs[1].stride[numDim-2]; } ldc = outputs[0].stride[numDim-1]; if (numDim == 2) { // Normal 2D Matmul checkCUDA(hipblasSgemm(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, (float*)inputs[1].data_ptr, ldb, &beta, (float*)outputs[0].data_ptr, ldc)); } else { // Batched Matmul int strideA = inputs[0].stride[numDim-3]; int strideB = inputs[1].stride[numDim-3]; int strideC = outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= outputs[0].dim[i]; checkCUDA(hipblasSgemmStridedBatched(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, strideA, (float*)inputs[1].data_ptr, ldb, strideB, &beta, (float*)outputs[0].data_ptr, ldc, strideC, batch)); } if (activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(model->dnn, actiDesc, &alpha, outputTensor, outputs[0].data_ptr, &beta, outputTensor, outputs[0].data_ptr)); if (block) checkCUDA(hipDeviceSynchronize()); } void Matmul::set_layout(void) { // CuBLAS uses column-major. int numDim = outputs[0].numDim; outputs[0].stride[numDim-2] = 1; outputs[0].stride[numDim-1] = outputs[0].dim[numDim-2]; int size = outputs[0].dim[numDim-2] * outputs[0].dim[numDim-1]; for (int i = numDim-3; i >= 0; i--) { outputs[0].stride[i] = size; size *= outputs[0].dim[i]; } assert(size == outputs[0].volume()); } void Model::measure_matmul_cost(Matmul* mm) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = mm->outputs[0].numDim; int m = mm->inputs[0].dim[numDim-2]; int n = mm->inputs[1].dim[numDim-1]; int k = mm->inputs[0].dim[numDim-1]; hipblasOperation_t transA, transB; int lda, ldb, ldc; if (mm->inputs[0].stride[numDim-2] == 1 && mm->inputs[0].stride[numDim-1] >= m) { transA = HIPBLAS_OP_N; lda = mm->inputs[0].stride[numDim-1]; } else { assert(mm->inputs[0].stride[numDim-1] == 1 && mm->inputs[0].stride[numDim-2] >= k); transA = HIPBLAS_OP_T; lda = mm->inputs[0].stride[numDim-2]; } if (mm->inputs[1].stride[numDim-2] == 1 && mm->inputs[1].stride[numDim-1] >= k) { transB = HIPBLAS_OP_N; ldb = mm->inputs[1].stride[numDim-1]; } else { assert(mm->inputs[1].stride[numDim-1] == 1 && mm->inputs[1].stride[numDim-2] >= n); transB = HIPBLAS_OP_T; ldb = mm->inputs[1].stride[numDim-2]; } ldc = mm->outputs[0].stride[numDim-1]; if (mm->activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (mm->activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } helperSetTensorDescriptor(mm->outputs[0], outputTensor); checkCUDA(hipDeviceSynchronize()); for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) { if (i == WARMUP_TIMES) checkCUDA(hipEventRecord(startEvent)); if (numDim == 2) { // Normal 2D Matmul checkCUDA(hipblasSgemm(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, filterPtr, ldb, &beta, outputPtr, ldc)); } else { // Batched Matmul int strideA = mm->inputs[0].stride[numDim-3]; int strideB = mm->inputs[1].stride[numDim-3]; int strideC = mm->outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= mm->outputs[0].dim[i]; checkCUDA(hipblasSgemmStridedBatched(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, strideA, filterPtr, ldb, strideB, &beta, outputPtr, ldc, strideC, batch)); } if (mm->activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(dnn, actiDesc, &alpha, outputTensor, outputPtr, &beta, outputTensor, outputPtr)); } checkCUDA(hipEventRecord(endEvent)); checkCUDA(hipEventSynchronize(endEvent)); float milliseconds; hipEventElapsedTime(&milliseconds, startEvent, endEvent); mm->runtime = milliseconds / REPEAT_TIMES; if (print_cost) printf(" measure[Matmul]: %s %s acti(%d) cost(%.4lf)\n", mm->inputs[0].to_string("input").c_str(), mm->inputs[1].to_string("weight").c_str(), mm->activation, mm->runtime); }
71a6338a38ae9efcf3b5ea827531fb92711c08ab.cu
/* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "taso/ops.h" #include "taso/cuda_helper.h" using namespace taso; void Matmul::map(void) { // create descriptors checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); helperSetTensorDescriptor(outputs[0], outputTensor); if (activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } // allocate tensors size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize)); } void Matmul::unmap(void) { checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor)); if (activation != AC_MODE_NONE) { checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc)); } checkCUDA(cudaFree(outputs[0].data_ptr)); } void Matmul::forward(bool block) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = outputs[0].numDim; int m = inputs[0].dim[numDim-2]; int n = inputs[1].dim[numDim-1]; int k = inputs[0].dim[numDim-1]; cublasOperation_t transA, transB; int lda, ldb, ldc; if (inputs[0].stride[numDim-2] == 1) { transA = CUBLAS_OP_N; lda = inputs[0].stride[numDim-1]; } else { assert(inputs[0].stride[numDim-1] == 1); transA = CUBLAS_OP_T; lda = inputs[0].stride[numDim-2]; } if (inputs[1].stride[numDim-2] == 1) { transB = CUBLAS_OP_N; ldb = inputs[1].stride[numDim-1]; } else { assert(inputs[1].stride[numDim-1] == 1); transB = CUBLAS_OP_T; ldb = inputs[1].stride[numDim-2]; } ldc = outputs[0].stride[numDim-1]; if (numDim == 2) { // Normal 2D Matmul checkCUDA(cublasSgemm(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, (float*)inputs[1].data_ptr, ldb, &beta, (float*)outputs[0].data_ptr, ldc)); } else { // Batched Matmul int strideA = inputs[0].stride[numDim-3]; int strideB = inputs[1].stride[numDim-3]; int strideC = outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= outputs[0].dim[i]; checkCUDA(cublasSgemmStridedBatched(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, strideA, (float*)inputs[1].data_ptr, ldb, strideB, &beta, (float*)outputs[0].data_ptr, ldc, strideC, batch)); } if (activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(model->dnn, actiDesc, &alpha, outputTensor, outputs[0].data_ptr, &beta, outputTensor, outputs[0].data_ptr)); if (block) checkCUDA(cudaDeviceSynchronize()); } void Matmul::set_layout(void) { // CuBLAS uses column-major. int numDim = outputs[0].numDim; outputs[0].stride[numDim-2] = 1; outputs[0].stride[numDim-1] = outputs[0].dim[numDim-2]; int size = outputs[0].dim[numDim-2] * outputs[0].dim[numDim-1]; for (int i = numDim-3; i >= 0; i--) { outputs[0].stride[i] = size; size *= outputs[0].dim[i]; } assert(size == outputs[0].volume()); } void Model::measure_matmul_cost(Matmul* mm) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = mm->outputs[0].numDim; int m = mm->inputs[0].dim[numDim-2]; int n = mm->inputs[1].dim[numDim-1]; int k = mm->inputs[0].dim[numDim-1]; cublasOperation_t transA, transB; int lda, ldb, ldc; if (mm->inputs[0].stride[numDim-2] == 1 && mm->inputs[0].stride[numDim-1] >= m) { transA = CUBLAS_OP_N; lda = mm->inputs[0].stride[numDim-1]; } else { assert(mm->inputs[0].stride[numDim-1] == 1 && mm->inputs[0].stride[numDim-2] >= k); transA = CUBLAS_OP_T; lda = mm->inputs[0].stride[numDim-2]; } if (mm->inputs[1].stride[numDim-2] == 1 && mm->inputs[1].stride[numDim-1] >= k) { transB = CUBLAS_OP_N; ldb = mm->inputs[1].stride[numDim-1]; } else { assert(mm->inputs[1].stride[numDim-1] == 1 && mm->inputs[1].stride[numDim-2] >= n); transB = CUBLAS_OP_T; ldb = mm->inputs[1].stride[numDim-2]; } ldc = mm->outputs[0].stride[numDim-1]; if (mm->activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (mm->activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } helperSetTensorDescriptor(mm->outputs[0], outputTensor); checkCUDA(cudaDeviceSynchronize()); for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) { if (i == WARMUP_TIMES) checkCUDA(cudaEventRecord(startEvent)); if (numDim == 2) { // Normal 2D Matmul checkCUDA(cublasSgemm(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, filterPtr, ldb, &beta, outputPtr, ldc)); } else { // Batched Matmul int strideA = mm->inputs[0].stride[numDim-3]; int strideB = mm->inputs[1].stride[numDim-3]; int strideC = mm->outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= mm->outputs[0].dim[i]; checkCUDA(cublasSgemmStridedBatched(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, strideA, filterPtr, ldb, strideB, &beta, outputPtr, ldc, strideC, batch)); } if (mm->activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(dnn, actiDesc, &alpha, outputTensor, outputPtr, &beta, outputTensor, outputPtr)); } checkCUDA(cudaEventRecord(endEvent)); checkCUDA(cudaEventSynchronize(endEvent)); float milliseconds; cudaEventElapsedTime(&milliseconds, startEvent, endEvent); mm->runtime = milliseconds / REPEAT_TIMES; if (print_cost) printf(" measure[Matmul]: %s %s acti(%d) cost(%.4lf)\n", mm->inputs[0].to_string("input").c_str(), mm->inputs[1].to_string("weight").c_str(), mm->activation, mm->runtime); }
789775934ff4712d493309fab6c68916969378e2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define BLOCK_SIZE 32 #define WA 64 #define HA 64 #define HC 3 #define WC 3 #define PAD 1 #define WB (WA+2*PAD - WC + 1) #define HB (HA+2*PAD - HC + 1) #define CHANNEL_SIZE 3 __device__ void flat_conv(float* Input, float* Kernel, float* Output,int* image_size, int* kernel_size, int* pad,int* out_w) { //__shared__ float kernel_part[kernel_size[2]][kernel_size[3]][kernel_size[1]]; //__shared__ float kernel_part[3][3][3]; extern __shared__ float kernel_part[]; int col_idx = blockIdx.x - pad[0] + threadIdx.x; int row_idx = blockIdx.y - pad[0] + threadIdx.y; int img_flat_size = image_size[1]*image_size[2]; int kernel_flat_size = kernel_size[2]*kernel_size[3]; if( image_size[2]>col_idx && col_idx >=0 && image_size[1]>row_idx && row_idx >=0) { kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z] = Input[(col_idx * image_size[2] +row_idx) + img_flat_size*threadIdx.z] * Kernel[threadIdx.y*kernel_size[3] + threadIdx.x + kernel_flat_size*threadIdx.z]; } else { kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z] = 0; } //__syncthreads; atomicAdd(&(Output[blockIdx.x * out_w[0] +blockIdx.y]), kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z]); } __global__ void conv(float* Input, float* Kernel, float* Output,int* image_size, int* kernel_size,int* pad) { int out_w = image_size[2]+2*pad[0] - kernel_size[3] + 1; int out_h = image_size[1]+2*pad[0] - kernel_size[2] + 1; int flat_kernel_size = kernel_size[3]*kernel_size[2]*kernel_size[1]; int flat_img_size = out_w*out_h; flat_conv(Input, Kernel + flat_kernel_size*blockIdx.z , Output + flat_img_size*blockIdx.z, image_size, kernel_size, pad,&out_w); } void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = (rand() / (float)RAND_MAX); } __host__ int main(void) { // float h_a[3][64][64] ={0.0}; // h_a[0][0][0] = 2.1; // h_a[1][0][0] = 2.1; // h_a[2][0][0] = 2.1; // float h_b[2][3][3][3] ={1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, // 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, // 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, // 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0, // 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0, // 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0}; // float h_c[2][64][64] ={0.0}; int kernel_size[4] ={2,3,3,3}; //O I H W; int image_size[3] = {3,64,64}; // O H W; float* h_a; float *h_b; float* h_c; int h_a_size = sizeof(float)*3*64*64; int h_b_size = sizeof(float)*3*64*64; int h_c_size = sizeof(float)*3*64*64; h_a = (float*)malloc(h_a_size); h_b = (float*)malloc(h_b_size); h_c = (float*)malloc(h_c_size); randomInit(h_a,3*64*64); randomInit(h_b,2*3*3*3); randomInit(h_c,2*64*64); int pad = 1; float *cimg; float *coimg; float *ckernel; int * cimg_size; int * ckernel_size; int * cpad; hipMalloc((void***)&cimg,h_a_size); hipMalloc((void***)&ckernel,h_b_size); hipMalloc((void***)&coimg,h_c_size); hipMalloc(&cimg_size,sizeof(image_size)); hipMalloc(&ckernel_size,sizeof(kernel_size)); hipMalloc(&cpad,sizeof(int)); hipMemcpy(cimg,h_a,h_a_size,hipMemcpyHostToDevice); hipMemcpy(ckernel,h_b,h_b_size,hipMemcpyHostToDevice); hipMemcpy(cimg_size,image_size,sizeof(image_size),hipMemcpyHostToDevice); hipMemcpy(ckernel_size,kernel_size,sizeof(kernel_size),hipMemcpyHostToDevice); hipMemcpy(cpad,&pad,sizeof(int),hipMemcpyHostToDevice); dim3 threads(kernel_size[3], kernel_size[2], kernel_size[1]); dim3 grid(image_size[2],image_size[1],kernel_size[0]); clock_t start = clock(); int flat_kernel_size = kernel_size[3]* kernel_size[2]* kernel_size[1]*sizeof(float); hipLaunchKernelGGL(( conv) , dim3(grid),dim3(threads),flat_kernel_size, 0, cimg,ckernel,coimg,cimg_size,ckernel_size,cpad); //Convolution <<< grid,threads>>>(cimg,ckernel,coimg,cimg_size,ckernel_size); clock_t end = clock(); hipMemcpy(h_c,coimg,h_c_size,hipMemcpyDeviceToHost); int cnt = 0; // for(int i = 0;i < 2; i++) // { // for(int j =0; j < WB;j ++) // { // for(int k =0; k < WB;k ++) // { // printf("%.0f ",h_c[cnt]); // cnt +=1; // } // printf("\n"); // } // printf("\n"); // } hipFree(cimg); hipFree(ckernel); hipFree(coimg); hipFree(cimg_size); hipFree(ckernel_size); hipFree(cpad); printf("%f",(float)(end - start)/CLOCKS_PER_SEC); }
789775934ff4712d493309fab6c68916969378e2.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define BLOCK_SIZE 32 #define WA 64 #define HA 64 #define HC 3 #define WC 3 #define PAD 1 #define WB (WA+2*PAD - WC + 1) #define HB (HA+2*PAD - HC + 1) #define CHANNEL_SIZE 3 __device__ void flat_conv(float* Input, float* Kernel, float* Output,int* image_size, int* kernel_size, int* pad,int* out_w) { //__shared__ float kernel_part[kernel_size[2]][kernel_size[3]][kernel_size[1]]; //__shared__ float kernel_part[3][3][3]; extern __shared__ float kernel_part[]; int col_idx = blockIdx.x - pad[0] + threadIdx.x; int row_idx = blockIdx.y - pad[0] + threadIdx.y; int img_flat_size = image_size[1]*image_size[2]; int kernel_flat_size = kernel_size[2]*kernel_size[3]; if( image_size[2]>col_idx && col_idx >=0 && image_size[1]>row_idx && row_idx >=0) { kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z] = Input[(col_idx * image_size[2] +row_idx) + img_flat_size*threadIdx.z] * Kernel[threadIdx.y*kernel_size[3] + threadIdx.x + kernel_flat_size*threadIdx.z]; } else { kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z] = 0; } //__syncthreads; atomicAdd(&(Output[blockIdx.x * out_w[0] +blockIdx.y]), kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z]); } __global__ void conv(float* Input, float* Kernel, float* Output,int* image_size, int* kernel_size,int* pad) { int out_w = image_size[2]+2*pad[0] - kernel_size[3] + 1; int out_h = image_size[1]+2*pad[0] - kernel_size[2] + 1; int flat_kernel_size = kernel_size[3]*kernel_size[2]*kernel_size[1]; int flat_img_size = out_w*out_h; flat_conv(Input, Kernel + flat_kernel_size*blockIdx.z , Output + flat_img_size*blockIdx.z, image_size, kernel_size, pad,&out_w); } void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = (rand() / (float)RAND_MAX); } __host__ int main(void) { // float h_a[3][64][64] ={0.0}; // h_a[0][0][0] = 2.1; // h_a[1][0][0] = 2.1; // h_a[2][0][0] = 2.1; // float h_b[2][3][3][3] ={1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, // 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, // 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, // 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0, // 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0, // 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0}; // float h_c[2][64][64] ={0.0}; int kernel_size[4] ={2,3,3,3}; //O I H W; int image_size[3] = {3,64,64}; // O H W; float* h_a; float *h_b; float* h_c; int h_a_size = sizeof(float)*3*64*64; int h_b_size = sizeof(float)*3*64*64; int h_c_size = sizeof(float)*3*64*64; h_a = (float*)malloc(h_a_size); h_b = (float*)malloc(h_b_size); h_c = (float*)malloc(h_c_size); randomInit(h_a,3*64*64); randomInit(h_b,2*3*3*3); randomInit(h_c,2*64*64); int pad = 1; float *cimg; float *coimg; float *ckernel; int * cimg_size; int * ckernel_size; int * cpad; cudaMalloc((void***)&cimg,h_a_size); cudaMalloc((void***)&ckernel,h_b_size); cudaMalloc((void***)&coimg,h_c_size); cudaMalloc(&cimg_size,sizeof(image_size)); cudaMalloc(&ckernel_size,sizeof(kernel_size)); cudaMalloc(&cpad,sizeof(int)); cudaMemcpy(cimg,h_a,h_a_size,cudaMemcpyHostToDevice); cudaMemcpy(ckernel,h_b,h_b_size,cudaMemcpyHostToDevice); cudaMemcpy(cimg_size,image_size,sizeof(image_size),cudaMemcpyHostToDevice); cudaMemcpy(ckernel_size,kernel_size,sizeof(kernel_size),cudaMemcpyHostToDevice); cudaMemcpy(cpad,&pad,sizeof(int),cudaMemcpyHostToDevice); dim3 threads(kernel_size[3], kernel_size[2], kernel_size[1]); dim3 grid(image_size[2],image_size[1],kernel_size[0]); clock_t start = clock(); int flat_kernel_size = kernel_size[3]* kernel_size[2]* kernel_size[1]*sizeof(float); conv <<< grid,threads,flat_kernel_size>>>(cimg,ckernel,coimg,cimg_size,ckernel_size,cpad); //Convolution <<< grid,threads>>>(cimg,ckernel,coimg,cimg_size,ckernel_size); clock_t end = clock(); cudaMemcpy(h_c,coimg,h_c_size,cudaMemcpyDeviceToHost); int cnt = 0; // for(int i = 0;i < 2; i++) // { // for(int j =0; j < WB;j ++) // { // for(int k =0; k < WB;k ++) // { // printf("%.0f ",h_c[cnt]); // cnt +=1; // } // printf("\n"); // } // printf("\n"); // } cudaFree(cimg); cudaFree(ckernel); cudaFree(coimg); cudaFree(cimg_size); cudaFree(ckernel_size); cudaFree(cpad); printf("%f",(float)(end - start)/CLOCKS_PER_SEC); }
group_norm_kernel.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/group_norm.h> #include <type_traits> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/native/TensorIterator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kReduceTileSize = 32; template <typename T> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T eps, const T* X, T* mean, T* rstd) { using T_ACC = acc_type<T, true>; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; sum1 += static_cast<T_ACC>(X[index]); sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]); } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC m_shared[C10_WARP_SIZE]; __shared__ T_ACC v_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared); } if (threadIdx.x == 0) { const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N); sum1 *= scale; sum2 = c10::hip::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0)); mean[i] = sum1; rstd[i] = c10::hip::compat::rsqrt(sum2 + static_cast<T_ACC>(eps)); } } template <typename T> __global__ void ComputeFusedParamsCUDAKernel( int64_t N, int64_t C, int64_t group, const T* mean, const T* rstd, const T* gamma, const T* beta, acc_type<T, true>* a, acc_type<T, true>* b) { using T_ACC = acc_type<T, true>; const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N * C) { const int64_t ng = index / (C / group); const int64_t c = index % C; const T_ACC scale = (gamma == nullptr) ? static_cast<T_ACC>(rstd[ng]) : static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]); a[index] = scale; b[index] = -scale * static_cast<T_ACC>(mean[ng]) + ((beta == nullptr) ? 0 : static_cast<T_ACC>(beta[c])); } } template <typename T> __global__ void Compute1dBackwardFusedParamsCUDAKernel( int64_t C, int64_t group, const T* dY, const T* X, const T* mean, const T* rstd, const T* gamma, acc_type<T, true>* c2, acc_type<T, true>* c3) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; const int64_t n = blockIdx.x; const int64_t g = blockIdx.y; const int64_t ng = n * G + g; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { const int64_t index = ng * D + i; const int64_t c = g * D + i; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]); sum1 += dY[index] * X[index] * gamma_v; sum2 += dY[index] * gamma_v; } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); } if (threadIdx.x == 0) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D); const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * s; c2[ng] = x; c3[ng] = -x * static_cast<T_ACC>(mean[ng]) - sum2 * static_cast<T_ACC>(rstd[ng]) * s; } } template <typename T> __global__ void GammaBeta1dBackwardCUDAKernel1( int64_t N, int64_t C, int64_t group, const T* dY, const T* X, const T* mean, const T* rstd, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c < C) { const int64_t G = group; const int64_t D = C / G; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t n = 0; n < N; ++n) { const int64_t nc = n * C + c; const int64_t ng = n * G + c / D; const T_ACC dy_acc = static_cast<T_ACC>(dY[nc]); const T_ACC x_acc = static_cast<T_ACC>(X[nc]); sum1 += (dgamma == nullptr) ? T_ACC(0) : ((dy_acc * x_acc - dy_acc * static_cast<T_ACC>(mean[ng])) * static_cast<T_ACC>(rstd[ng])); sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc; } if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } template <typename T> __global__ void GammaBeta1dBackwardCUDAKernel2( int64_t N, int64_t C, int64_t group, const T* dY, const T* X, const T* mean, const T* rstd, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (c < C) { const int64_t G = group; const int64_t D = C / G; // Accumulate each 32 cols into a 32 * 32 tile. // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows // of a 32 contiguous elements. for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { const int64_t n1 = n; const int64_t n2 = n + blockDim.y; const int64_t nc1 = n1 * C + c; const int64_t nc2 = n2 * C + c; const int64_t ng1 = n1 * G + c / D; const int64_t ng2 = n2 * G + c / D; const T_ACC dy1_acc = static_cast<T_ACC>(dY[nc1]); const T_ACC x1_acc = static_cast<T_ACC>(X[nc1]); dg_sum1 += dgamma == nullptr ? T_ACC(0) : ((dy1_acc * x1_acc - dy1_acc * static_cast<T_ACC>(mean[ng1])) * static_cast<T_ACC>(rstd[ng1])); db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc; if (n2 < N) { const T_ACC dy2_acc = static_cast<T_ACC>(dY[nc2]); const T_ACC x2_acc = static_cast<T_ACC>(X[nc2]); dg_sum2 += dgamma == nullptr ? T_ACC(0) : ((dy2_acc * x2_acc - dy2_acc * static_cast<T_ACC>(mean[ng2])) * static_cast<T_ACC>(rstd[ng2])); db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc; } } } // Write accumulated tile to shared memory. g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); // Do warp reduce for the 1st 16 cols in the tile. T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } // Do warp reduce for the 2nd 16 cols in the tile. sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t HxW, const T* dY, const T* X, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; const int64_t nc = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) { const int64_t index = nc * HxW + hw; sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]); sum2 += static_cast<T_ACC>(dY[index]); } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); } if (threadIdx.x == 0) { ds[nc] = sum1; db[nc] = sum2; } } template <typename T> __global__ void ComputeBackwardFusedParamsCUDAKernel( int64_t C, int64_t HxW, int64_t group, const T* mean, const T* rstd, const T* gamma, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c2, acc_type<T, true>* c3) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; const int64_t n = blockIdx.x; const int64_t g = blockIdx.y; const int64_t ng = n * G + g; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { const int64_t index = ng * D + i; const int64_t c = g * D + i; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]); sum1 += ds[index] * gamma_v; sum2 += db[index] * gamma_v; } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); } if (threadIdx.x == 0) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D * HxW); const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * s; c2[ng] = x; c3[ng] = -x * static_cast<T_ACC>(mean[ng]) - sum2 * static_cast<T_ACC>(rstd[ng]) * s; } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel1( int64_t N, int64_t C, int64_t group, const T* mean, const T* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c < C) { const int64_t G = group; const int64_t D = C / G; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t n = 0; n < N; ++n) { const int64_t nc = n * C + c; const int64_t ng = n * G + c / D; sum1 += (dgamma == nullptr) ? T_ACC(0) : ((ds[nc] - db[nc] * static_cast<T_ACC>(mean[ng])) * static_cast<T_ACC>(rstd[ng])); sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc]; } if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel2( int64_t N, int64_t C, int64_t group, const T* mean, const T* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (c < C) { const int64_t G = group; const int64_t D = C / G; // Accumulate each 32 cols into a 32 * 32 tile. // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows // of a 32 contiguous elements. for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { const int64_t n1 = n; const int64_t n2 = n + blockDim.y; const int64_t nc1 = n1 * C + c; const int64_t nc2 = n2 * C + c; const int64_t ng1 = n1 * G + c / D; const int64_t ng2 = n2 * G + c / D; dg_sum1 += dgamma == nullptr ? T_ACC(0) : ((ds[nc1] - db[nc1] * static_cast<T_ACC>(mean[ng1])) * static_cast<T_ACC>(rstd[ng1])); db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1]; if (n2 < N) { dg_sum2 += dgamma == nullptr ? T_ACC(0) : ((ds[nc2] - db[nc2] * static_cast<T_ACC>(mean[ng2])) * static_cast<T_ACC>(rstd[ng2])); db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2]; } } } // Write accumulated tile to shared memory. g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); // Do warp reduce for the 1st 16 cols in the tile. T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } // Do warp reduce for the 2st 16 cols in the tile. sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } } template <typename T> void GroupNorm1dForward( const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, const Tensor& beta, int64_t N, int64_t C, int64_t group, Tensor& Y) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; if (gamma.defined() && beta.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(mean.view({N, G, 1})) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .add_input(beta.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma) + static_cast<T_ACC>(beta); }); } else if (gamma.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(mean.view({N, G, 1})) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma); }); } else if (beta.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(mean.view({N, G, 1})) .add_input(rstd.view({N, G, 1})) .add_input(beta.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd) + static_cast<T_ACC>(beta); }); } else { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N * G, D})) .add_input(X.view({N * G, D})) .add_input(mean.view({N * G, 1})) .add_input(rstd.view({N * G, 1})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd); }); } AT_CUDA_CHECK(hipGetLastError()); } template <typename T> void GroupNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t N, int64_t C, int64_t HxW, int64_t group, T eps, Tensor& Y, Tensor& mean, Tensor& rstd) { using T_ACC = acc_type<T, true>; TORCH_CHECK(X.numel() == N * C * HxW); TORCH_CHECK(!gamma.defined() || gamma.numel() == C); TORCH_CHECK(!beta.defined() || beta.numel() == C); if (N == 0) { return; } const int64_t G = group; const int64_t D = C / G; const T* X_data = X.data_ptr<T>(); T* Y_data = Y.data_ptr<T>(); T* mean_data = mean.data_ptr<T>(); T* rstd_data = rstd.data_ptr<T>(); hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>), dim3(N * G), dim3(num_threads), 0, cuda_stream, D * HxW, eps, X_data, mean_data, rstd_data); C10_HIP_KERNEL_LAUNCH_CHECK(); if (HxW == 1) { GroupNorm1dForward<T>(X, mean, rstd, gamma, beta, N, C, G, Y); } else if (!gamma.defined() && !beta.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N * G, D * HxW})) .add_input(X.view({N * G, D * HxW})) .add_input(mean.view({N * G, 1})) .add_input(rstd.view({N * G, 1})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd); }); } else { const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor a = at::empty({N, C}, X.options().dtype(kAccType)); Tensor b = at::empty({N, C}, X.options().dtype(kAccType)); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T_ACC* a_data = a.data_ptr<T_ACC>(); T_ACC* b_data = b.data_ptr<T_ACC>(); // TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are // using maunal kernel here. Make it using gpu_kernel_multiple_outputs once // the issue fixed. const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data); C10_HIP_KERNEL_LAUNCH_CHECK(); auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(Y.view({N * C, HxW})) .add_input(X.view({N * C, HxW})) .add_input(a.view({N * C, 1})) .add_input(b.view({N * C, 1})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T { return a * static_cast<T_ACC>(x) + b; }); } } void GroupNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, Tensor& Y, Tensor& mean, Tensor& rstd) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "GroupNormKernelImpl", [&]() { GroupNormKernelImplInternal<scalar_t>( X, gamma, beta, N, C, HxW, group, static_cast<scalar_t>(eps), Y, mean, rstd); }); } template <typename T> void GroupNorm1dBackward( const Tensor dY, const Tensor X, const Tensor mean, const Tensor rstd, const Tensor gamma, int64_t N, int64_t C, int64_t group, Tensor& dX, Tensor& dgamma, Tensor& dbeta) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; const T* dY_data = dY.data_ptr<T>(); const T* X_data = X.data_ptr<T>(); const T* mean_data = mean.data_ptr<T>(); const T* rstd_data = rstd.data_ptr<T>(); hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (dX.defined()) { const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); T_ACC* c2_data = c2.data_ptr<T_ACC>(); T_ACC* c3_data = c3.data_ptr<T_ACC>(); const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; hipLaunchKernelGGL(( Compute1dBackwardFusedParamsCUDAKernel<T>) , dim3(dim3(N, G)), dim3(num_threads), 0, cuda_stream, C, G, dY_data, X_data, mean_data, rstd_data, gamma_data, c2_data, c3_data); C10_HIP_KERNEL_LAUNCH_CHECK(); if (gamma.defined()) { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N, G, D})) .add_input(dY.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .add_input(c2.view({N, G, 1})) .add_input(c3.view({N, G, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T { const T_ACC c1 = static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma); return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } else { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N * G, D})) .add_input(dY.view({N * G, D})) .add_input(X.view({N * G, D})) .add_input(rstd.view({N * G, 1})) .add_input(c2.view({N * G, 1})) .add_input(c3.view({N * G, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T { const T_ACC c1 = static_cast<T_ACC>(rstd); return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } } if (dgamma.defined() || dbeta.defined()) { T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr; T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr; if (N <= 128) { const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( GammaBeta1dBackwardCUDAKernel1<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, N, C, G, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; // The algorithm for colwise reduction here is to accumulate each 32 cols // to a 32 * 32 tile and write the tile to shared memmory. Then do warp // reduce for each col in the tile. So here the blockDim must be (32, 16). constexpr int kThreadX = kReduceTileSize; constexpr int kThreadY = kReduceTileSize / 2; hipLaunchKernelGGL(( GammaBeta1dBackwardCUDAKernel2<T>) , dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream, N, C, G, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } template <typename T> void GroupNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t N, int64_t C, int64_t HxW, int64_t group, Tensor& dX, Tensor& dgamma, Tensor& dbeta) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; TORCH_CHECK(dY.numel() == N * C * HxW); TORCH_CHECK(X.numel() == N * C * HxW); TORCH_CHECK(mean.numel() == N * G); TORCH_CHECK(rstd.numel() == N * G); TORCH_CHECK(!gamma.defined() || gamma.numel() == C); hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (N == 0) { if (dgamma.defined()) { dgamma.fill_(T(0)); } if (dbeta.defined()) { dbeta.fill_(T(0)); } return; } const T* dY_data = dY.data_ptr<T>(); const T* X_data = X.data_ptr<T>(); const T* mean_data = mean.data_ptr<T>(); const T* rstd_data = rstd.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor ds = at::empty({N, C}, X.options().dtype(kAccType)); Tensor db = at::empty({N, C}, X.options().dtype(kAccType)); T_ACC* ds_data = ds.data_ptr<T_ACC>(); T_ACC* db_data = db.data_ptr<T_ACC>(); if (HxW == 1) { GroupNorm1dBackward<T>( dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta); return; } int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>), dim3(N * C), dim3(num_threads), 0, cuda_stream, HxW, dY_data, X_data, ds_data, db_data); C10_HIP_KERNEL_LAUNCH_CHECK(); if (dX.defined()) { Tensor c1 = at::empty({0}, X.options().dtype(kAccType)); Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); T_ACC* c2_data = c2.data_ptr<T_ACC>(); T_ACC* c3_data = c3.data_ptr<T_ACC>(); if (gamma.defined()) { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .add_output(c1) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC { return static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma); }); } num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; hipLaunchKernelGGL(( ComputeBackwardFusedParamsCUDAKernel<T>) , dim3(dim3(N, G)), dim3(num_threads), 0, cuda_stream, C, HxW, G, mean_data, rstd_data, gamma_data, ds_data, db_data, c2_data, c3_data); C10_HIP_KERNEL_LAUNCH_CHECK(); if (gamma.defined()) { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N * G, D, HxW})) .add_input(dY.view({N * G, D, HxW})) .add_input(X.view({N * G, D, HxW})) .add_input(c1.view({N * G, D, 1})) .add_input(c2.view({N * G, 1, 1})) .add_input(c3.view({N * G, 1, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } else { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N * G, D * HxW})) .add_input(dY.view({N * G, D * HxW})) .add_input(X.view({N * G, D * HxW})) .add_input(rstd.view({N * G, 1})) .add_input(c2.view({N * G, 1})) .add_input(c3.view({N * G, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } } if (dgamma.defined() || dbeta.defined()) { T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr; T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr; if (N <= 128) { // For small batch size, do colwise reduce directly. const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel1<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, N, C, G, mean_data, rstd_data, ds_data, db_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; // The algorithm for colwise reduction here is to accumulate each 32 cols // to a 32 * 32 tile and write the tile to shared memmory. Then do warp // reduce for each col in the tile. So here the blockDim must be (32, 16). constexpr int kThreadX = kReduceTileSize; constexpr int kThreadY = kReduceTileSize / 2; hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel2<T>) , dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream, N, C, G, mean_data, rstd_data, ds_data, db_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } void GroupNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t N, int64_t C, int64_t HxW, int64_t group, Tensor& dX, Tensor& dgamma, Tensor& dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "GroupNormBackwardKernelImpl", [&]() { GroupNormBackwardKernelImplInternal<scalar_t>( dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta); }); } } // namespace REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl); REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl); } // namespace native } // namespace at
group_norm_kernel.cu
#include <ATen/native/group_norm.h> #include <type_traits> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/native/TensorIterator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kReduceTileSize = 32; template <typename T> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T eps, const T* X, T* mean, T* rstd) { using T_ACC = acc_type<T, true>; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; sum1 += static_cast<T_ACC>(X[index]); sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]); } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC m_shared[C10_WARP_SIZE]; __shared__ T_ACC v_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared); } if (threadIdx.x == 0) { const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N); sum1 *= scale; sum2 = c10::cuda::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0)); mean[i] = sum1; rstd[i] = c10::cuda::compat::rsqrt(sum2 + static_cast<T_ACC>(eps)); } } template <typename T> __global__ void ComputeFusedParamsCUDAKernel( int64_t N, int64_t C, int64_t group, const T* mean, const T* rstd, const T* gamma, const T* beta, acc_type<T, true>* a, acc_type<T, true>* b) { using T_ACC = acc_type<T, true>; const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N * C) { const int64_t ng = index / (C / group); const int64_t c = index % C; const T_ACC scale = (gamma == nullptr) ? static_cast<T_ACC>(rstd[ng]) : static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]); a[index] = scale; b[index] = -scale * static_cast<T_ACC>(mean[ng]) + ((beta == nullptr) ? 0 : static_cast<T_ACC>(beta[c])); } } template <typename T> __global__ void Compute1dBackwardFusedParamsCUDAKernel( int64_t C, int64_t group, const T* dY, const T* X, const T* mean, const T* rstd, const T* gamma, acc_type<T, true>* c2, acc_type<T, true>* c3) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; const int64_t n = blockIdx.x; const int64_t g = blockIdx.y; const int64_t ng = n * G + g; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { const int64_t index = ng * D + i; const int64_t c = g * D + i; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]); sum1 += dY[index] * X[index] * gamma_v; sum2 += dY[index] * gamma_v; } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); } if (threadIdx.x == 0) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D); const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * s; c2[ng] = x; c3[ng] = -x * static_cast<T_ACC>(mean[ng]) - sum2 * static_cast<T_ACC>(rstd[ng]) * s; } } template <typename T> __global__ void GammaBeta1dBackwardCUDAKernel1( int64_t N, int64_t C, int64_t group, const T* dY, const T* X, const T* mean, const T* rstd, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c < C) { const int64_t G = group; const int64_t D = C / G; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t n = 0; n < N; ++n) { const int64_t nc = n * C + c; const int64_t ng = n * G + c / D; const T_ACC dy_acc = static_cast<T_ACC>(dY[nc]); const T_ACC x_acc = static_cast<T_ACC>(X[nc]); sum1 += (dgamma == nullptr) ? T_ACC(0) : ((dy_acc * x_acc - dy_acc * static_cast<T_ACC>(mean[ng])) * static_cast<T_ACC>(rstd[ng])); sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc; } if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } template <typename T> __global__ void GammaBeta1dBackwardCUDAKernel2( int64_t N, int64_t C, int64_t group, const T* dY, const T* X, const T* mean, const T* rstd, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (c < C) { const int64_t G = group; const int64_t D = C / G; // Accumulate each 32 cols into a 32 * 32 tile. // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows // of a 32 contiguous elements. for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { const int64_t n1 = n; const int64_t n2 = n + blockDim.y; const int64_t nc1 = n1 * C + c; const int64_t nc2 = n2 * C + c; const int64_t ng1 = n1 * G + c / D; const int64_t ng2 = n2 * G + c / D; const T_ACC dy1_acc = static_cast<T_ACC>(dY[nc1]); const T_ACC x1_acc = static_cast<T_ACC>(X[nc1]); dg_sum1 += dgamma == nullptr ? T_ACC(0) : ((dy1_acc * x1_acc - dy1_acc * static_cast<T_ACC>(mean[ng1])) * static_cast<T_ACC>(rstd[ng1])); db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc; if (n2 < N) { const T_ACC dy2_acc = static_cast<T_ACC>(dY[nc2]); const T_ACC x2_acc = static_cast<T_ACC>(X[nc2]); dg_sum2 += dgamma == nullptr ? T_ACC(0) : ((dy2_acc * x2_acc - dy2_acc * static_cast<T_ACC>(mean[ng2])) * static_cast<T_ACC>(rstd[ng2])); db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc; } } } // Write accumulated tile to shared memory. g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); // Do warp reduce for the 1st 16 cols in the tile. T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } // Do warp reduce for the 2nd 16 cols in the tile. sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t HxW, const T* dY, const T* X, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; const int64_t nc = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) { const int64_t index = nc * HxW + hw; sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]); sum2 += static_cast<T_ACC>(dY[index]); } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); } if (threadIdx.x == 0) { ds[nc] = sum1; db[nc] = sum2; } } template <typename T> __global__ void ComputeBackwardFusedParamsCUDAKernel( int64_t C, int64_t HxW, int64_t group, const T* mean, const T* rstd, const T* gamma, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c2, acc_type<T, true>* c3) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; const int64_t n = blockIdx.x; const int64_t g = blockIdx.y; const int64_t ng = n * G + g; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { const int64_t index = ng * D + i; const int64_t c = g * D + i; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]); sum1 += ds[index] * gamma_v; sum2 += db[index] * gamma_v; } if (blockDim.x <= C10_WARP_SIZE) { sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); } else { __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); } if (threadIdx.x == 0) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D * HxW); const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * s; c2[ng] = x; c3[ng] = -x * static_cast<T_ACC>(mean[ng]) - sum2 * static_cast<T_ACC>(rstd[ng]) * s; } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel1( int64_t N, int64_t C, int64_t group, const T* mean, const T* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c < C) { const int64_t G = group; const int64_t D = C / G; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t n = 0; n < N; ++n) { const int64_t nc = n * C + c; const int64_t ng = n * G + c / D; sum1 += (dgamma == nullptr) ? T_ACC(0) : ((ds[nc] - db[nc] * static_cast<T_ACC>(mean[ng])) * static_cast<T_ACC>(rstd[ng])); sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc]; } if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel2( int64_t N, int64_t C, int64_t group, const T* mean, const T* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, T* dgamma, T* dbeta) { using T_ACC = acc_type<T, true>; __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (c < C) { const int64_t G = group; const int64_t D = C / G; // Accumulate each 32 cols into a 32 * 32 tile. // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows // of a 32 contiguous elements. for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { const int64_t n1 = n; const int64_t n2 = n + blockDim.y; const int64_t nc1 = n1 * C + c; const int64_t nc2 = n2 * C + c; const int64_t ng1 = n1 * G + c / D; const int64_t ng2 = n2 * G + c / D; dg_sum1 += dgamma == nullptr ? T_ACC(0) : ((ds[nc1] - db[nc1] * static_cast<T_ACC>(mean[ng1])) * static_cast<T_ACC>(rstd[ng1])); db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1]; if (n2 < N) { dg_sum2 += dgamma == nullptr ? T_ACC(0) : ((ds[nc2] - db[nc2] * static_cast<T_ACC>(mean[ng2])) * static_cast<T_ACC>(rstd[ng2])); db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2]; } } } // Write accumulated tile to shared memory. g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); // Do warp reduce for the 1st 16 cols in the tile. T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } // Do warp reduce for the 2st 16 cols in the tile. sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1); sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2); if (threadIdx.x == 0) { const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (c < C) { if (dgamma != nullptr) { dgamma[c] = sum1; } if (dbeta != nullptr) { dbeta[c] = sum2; } } } } template <typename T> void GroupNorm1dForward( const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, const Tensor& beta, int64_t N, int64_t C, int64_t group, Tensor& Y) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; if (gamma.defined() && beta.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(mean.view({N, G, 1})) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .add_input(beta.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma) + static_cast<T_ACC>(beta); }); } else if (gamma.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(mean.view({N, G, 1})) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma); }); } else if (beta.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(mean.view({N, G, 1})) .add_input(rstd.view({N, G, 1})) .add_input(beta.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd) + static_cast<T_ACC>(beta); }); } else { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N * G, D})) .add_input(X.view({N * G, D})) .add_input(mean.view({N * G, 1})) .add_input(rstd.view({N * G, 1})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd); }); } AT_CUDA_CHECK(cudaGetLastError()); } template <typename T> void GroupNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t N, int64_t C, int64_t HxW, int64_t group, T eps, Tensor& Y, Tensor& mean, Tensor& rstd) { using T_ACC = acc_type<T, true>; TORCH_CHECK(X.numel() == N * C * HxW); TORCH_CHECK(!gamma.defined() || gamma.numel() == C); TORCH_CHECK(!beta.defined() || beta.numel() == C); if (N == 0) { return; } const int64_t G = group; const int64_t D = C / G; const T* X_data = X.data_ptr<T>(); T* Y_data = Y.data_ptr<T>(); T* mean_data = mean.data_ptr<T>(); T* rstd_data = rstd.data_ptr<T>(); cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; RowwiseMomentsCUDAKernel<T><<<N * G, num_threads, 0, cuda_stream>>>( D * HxW, eps, X_data, mean_data, rstd_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (HxW == 1) { GroupNorm1dForward<T>(X, mean, rstd, gamma, beta, N, C, G, Y); } else if (!gamma.defined() && !beta.defined()) { auto iter = TensorIteratorConfig() .resize_outputs(false) .add_output(Y.view({N * G, D * HxW})) .add_input(X.view({N * G, D * HxW})) .add_input(mean.view({N * G, 1})) .add_input(rstd.view({N * G, 1})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) * static_cast<T_ACC>(rstd); }); } else { const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor a = at::empty({N, C}, X.options().dtype(kAccType)); Tensor b = at::empty({N, C}, X.options().dtype(kAccType)); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T_ACC* a_data = a.data_ptr<T_ACC>(); T_ACC* b_data = b.data_ptr<T_ACC>(); // TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are // using maunal kernel here. Make it using gpu_kernel_multiple_outputs once // the issue fixed. const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads; ComputeFusedParamsCUDAKernel<T><<<B, kCUDANumThreads, 0, cuda_stream>>>( N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(Y.view({N * C, HxW})) .add_input(X.view({N * C, HxW})) .add_input(a.view({N * C, 1})) .add_input(b.view({N * C, 1})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T { return a * static_cast<T_ACC>(x) + b; }); } } void GroupNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, Tensor& Y, Tensor& mean, Tensor& rstd) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "GroupNormKernelImpl", [&]() { GroupNormKernelImplInternal<scalar_t>( X, gamma, beta, N, C, HxW, group, static_cast<scalar_t>(eps), Y, mean, rstd); }); } template <typename T> void GroupNorm1dBackward( const Tensor dY, const Tensor X, const Tensor mean, const Tensor rstd, const Tensor gamma, int64_t N, int64_t C, int64_t group, Tensor& dX, Tensor& dgamma, Tensor& dbeta) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; const T* dY_data = dY.data_ptr<T>(); const T* X_data = X.data_ptr<T>(); const T* mean_data = mean.data_ptr<T>(); const T* rstd_data = rstd.data_ptr<T>(); cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (dX.defined()) { const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); T_ACC* c2_data = c2.data_ptr<T_ACC>(); T_ACC* c3_data = c3.data_ptr<T_ACC>(); const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; Compute1dBackwardFusedParamsCUDAKernel<T> <<<dim3(N, G), num_threads, 0, cuda_stream>>>( C, G, dY_data, X_data, mean_data, rstd_data, gamma_data, c2_data, c3_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (gamma.defined()) { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N, G, D})) .add_input(dY.view({N, G, D})) .add_input(X.view({N, G, D})) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .add_input(c2.view({N, G, 1})) .add_input(c3.view({N, G, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T { const T_ACC c1 = static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma); return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } else { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N * G, D})) .add_input(dY.view({N * G, D})) .add_input(X.view({N * G, D})) .add_input(rstd.view({N * G, 1})) .add_input(c2.view({N * G, 1})) .add_input(c3.view({N * G, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T { const T_ACC c1 = static_cast<T_ACC>(rstd); return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } } if (dgamma.defined() || dbeta.defined()) { T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr; T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr; if (N <= 128) { const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; GammaBeta1dBackwardCUDAKernel1<T><<<B, kCUDANumThreads, 0, cuda_stream>>>( N, C, G, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; // The algorithm for colwise reduction here is to accumulate each 32 cols // to a 32 * 32 tile and write the tile to shared memmory. Then do warp // reduce for each col in the tile. So here the blockDim must be (32, 16). constexpr int kThreadX = kReduceTileSize; constexpr int kThreadY = kReduceTileSize / 2; GammaBeta1dBackwardCUDAKernel2<T> <<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>( N, C, G, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } template <typename T> void GroupNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t N, int64_t C, int64_t HxW, int64_t group, Tensor& dX, Tensor& dgamma, Tensor& dbeta) { using T_ACC = acc_type<T, true>; const int64_t G = group; const int64_t D = C / G; TORCH_CHECK(dY.numel() == N * C * HxW); TORCH_CHECK(X.numel() == N * C * HxW); TORCH_CHECK(mean.numel() == N * G); TORCH_CHECK(rstd.numel() == N * G); TORCH_CHECK(!gamma.defined() || gamma.numel() == C); cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (N == 0) { if (dgamma.defined()) { dgamma.fill_(T(0)); } if (dbeta.defined()) { dbeta.fill_(T(0)); } return; } const T* dY_data = dY.data_ptr<T>(); const T* X_data = X.data_ptr<T>(); const T* mean_data = mean.data_ptr<T>(); const T* rstd_data = rstd.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor ds = at::empty({N, C}, X.options().dtype(kAccType)); Tensor db = at::empty({N, C}, X.options().dtype(kAccType)); T_ACC* ds_data = ds.data_ptr<T_ACC>(); T_ACC* db_data = db.data_ptr<T_ACC>(); if (HxW == 1) { GroupNorm1dBackward<T>( dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta); return; } int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; ComputeInternalGradientsCUDAKernel<T><<<N * C, num_threads, 0, cuda_stream>>>( HxW, dY_data, X_data, ds_data, db_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (dX.defined()) { Tensor c1 = at::empty({0}, X.options().dtype(kAccType)); Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); T_ACC* c2_data = c2.data_ptr<T_ACC>(); T_ACC* c3_data = c3.data_ptr<T_ACC>(); if (gamma.defined()) { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .add_output(c1) .add_input(rstd.view({N, G, 1})) .add_input(gamma.view({1, G, D})) .build(); gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC { return static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma); }); } num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads ? C10_WARP_SIZE : cuda_utils::kCUDABlockReduceNumThreads; ComputeBackwardFusedParamsCUDAKernel<T> <<<dim3(N, G), num_threads, 0, cuda_stream>>>( C, HxW, G, mean_data, rstd_data, gamma_data, ds_data, db_data, c2_data, c3_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (gamma.defined()) { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N * G, D, HxW})) .add_input(dY.view({N * G, D, HxW})) .add_input(X.view({N * G, D, HxW})) .add_input(c1.view({N * G, D, 1})) .add_input(c2.view({N * G, 1, 1})) .add_input(c3.view({N * G, 1, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } else { auto iter = TensorIteratorConfig() .check_all_same_dtype(std::is_same<T, T_ACC>::value) .resize_outputs(false) .add_output(dX.view({N * G, D * HxW})) .add_input(dY.view({N * G, D * HxW})) .add_input(X.view({N * G, D * HxW})) .add_input(rstd.view({N * G, 1})) .add_input(c2.view({N * G, 1})) .add_input(c3.view({N * G, 1})) .build(); gpu_kernel( iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) + c3; }); } } if (dgamma.defined() || dbeta.defined()) { T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr; T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr; if (N <= 128) { // For small batch size, do colwise reduce directly. const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; GammaBetaBackwardCUDAKernel1<T><<<B, kCUDANumThreads, 0, cuda_stream>>>( N, C, G, mean_data, rstd_data, ds_data, db_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; // The algorithm for colwise reduction here is to accumulate each 32 cols // to a 32 * 32 tile and write the tile to shared memmory. Then do warp // reduce for each col in the tile. So here the blockDim must be (32, 16). constexpr int kThreadX = kReduceTileSize; constexpr int kThreadY = kReduceTileSize / 2; GammaBetaBackwardCUDAKernel2<T> <<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>( N, C, G, mean_data, rstd_data, ds_data, db_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } void GroupNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t N, int64_t C, int64_t HxW, int64_t group, Tensor& dX, Tensor& dgamma, Tensor& dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "GroupNormBackwardKernelImpl", [&]() { GroupNormBackwardKernelImplInternal<scalar_t>( dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta); }); } } // namespace REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl); REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl); } // namespace native } // namespace at
roll_op.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/roll_op.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/core/utils/array.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; template <typename T, size_t Rank> __global__ void RollCudaKernel(const T* input, T* output, int64_t N, phi::Array<int64_t, Rank> shifts, phi::Array<int64_t, Rank> strides, phi::Array<int64_t, Rank> sizes) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } int64_t output_idx = idx; int64_t new_dim_idx = 0; #pragma unroll for (size_t i = 0; i < Rank; i++) { new_dim_idx = (idx / strides[i]) % sizes[i] + shifts[i]; if (new_dim_idx >= sizes[i]) { output_idx += (shifts[i] - sizes[i]) * strides[i]; } else { output_idx += shifts[i] * strides[i]; } } output[output_idx] = input[idx]; } template <typename T> class RollKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts"); if (context.HasInput("ShiftsTensor")) { const auto* shifts_tensor = context.Input<framework::Tensor>("ShiftsTensor"); PADDLE_ENFORCE_EQ( shifts_tensor->dims().size(), 1, platform::errors::InvalidArgument( "The rank of ShiftsTensor is expected to be 1, got %s", shifts_tensor->dims().size())); shifts = GetDataFromTensor<int64_t>(shifts_tensor); } std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis"); auto* in_data = in->data<T>(); auto* out_data = out->mutable_data<T>(context.GetPlace()); int64_t numel = in->numel(); auto stream = context.template device_context<platform::CUDADeviceContext>().stream(); size_t nums = shifts.size(); auto input_dim = in->dims(); auto stride_dim = phi::stride(input_dim); std::vector<int64_t> strides(nums), sizes(nums); if (dims.size() == 0) { strides[0] = 1; sizes[0] = numel; shifts[0] = (shifts[0] % numel + numel) % numel; } else { for (size_t i = 0; i < nums; i++) { int dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size(); int64_t size = input_dim[dim]; if (size != 0) { shifts[i] = (shifts[i] % size + size) % size; strides[i] = stride_dim[dim]; sizes[i] = size; } } } #define CALL_ROLL_CUDA_KERNEL(N) \ case N: { \ phi::Array<int64_t, N> _strides; \ phi::Array<int64_t, N> _shifts; \ phi::Array<int64_t, N> _sizes; \ for (size_t idx = 0; idx < N; ++idx) { \ _strides[idx] = strides[idx]; \ _shifts[idx] = shifts[idx]; \ _sizes[idx] = sizes[idx]; \ } \ hipLaunchKernelGGL(( RollCudaKernel< \ T, \ N>), dim3((numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS), \ PADDLE_CUDA_NUM_THREADS, 0, stream, in_data, out_data, numel, \ _shifts, _strides, _sizes); \ break; \ } switch (nums) { CALL_ROLL_CUDA_KERNEL(1); CALL_ROLL_CUDA_KERNEL(2); CALL_ROLL_CUDA_KERNEL(3); CALL_ROLL_CUDA_KERNEL(4); CALL_ROLL_CUDA_KERNEL(5); CALL_ROLL_CUDA_KERNEL(6); CALL_ROLL_CUDA_KERNEL(7); CALL_ROLL_CUDA_KERNEL(8); CALL_ROLL_CUDA_KERNEL(9); default: PADDLE_THROW(platform::errors::InvalidArgument( "shifts.size() should be less than 10, But received shifts.size() " "= %d", shifts.size())); } } }; template <typename T> class RollGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>(framework::GradVarName("Out")); auto* out = context.Output<LoDTensor>(framework::GradVarName("X")); std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts"); if (context.HasInput("ShiftsTensor")) { const auto* shifts_tensor = context.Input<framework::Tensor>("ShiftsTensor"); PADDLE_ENFORCE_EQ( shifts_tensor->dims().size(), 1, platform::errors::InvalidArgument( "The rank of ShiftsTensor is expected to be 1, got %s", shifts_tensor->dims().size())); shifts = GetDataFromTensor<int64_t>(shifts_tensor); } std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis"); auto* in_data = in->data<T>(); auto* out_data = out->mutable_data<T>(context.GetPlace()); int64_t numel = in->numel(); auto stream = context.template device_context<platform::CUDADeviceContext>().stream(); size_t nums = shifts.size(); auto input_dim = in->dims(); auto stride_dim = phi::stride(input_dim); std::vector<int64_t> strides(nums), sizes(nums); if (dims.size() == 0) { strides[0] = 1; sizes[0] = numel; shifts[0] = ((-shifts[0]) % numel + numel) % numel; } else { for (size_t i = 0; i < nums; i++) { int dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size(); int64_t size = input_dim[dim]; if (size != 0) { shifts[i] = ((-shifts[i]) % size + size) % size; strides[i] = stride_dim[dim]; sizes[i] = size; } } } switch (nums) { CALL_ROLL_CUDA_KERNEL(1); CALL_ROLL_CUDA_KERNEL(2); CALL_ROLL_CUDA_KERNEL(3); CALL_ROLL_CUDA_KERNEL(4); CALL_ROLL_CUDA_KERNEL(5); CALL_ROLL_CUDA_KERNEL(6); CALL_ROLL_CUDA_KERNEL(7); CALL_ROLL_CUDA_KERNEL(8); CALL_ROLL_CUDA_KERNEL(9); default: PADDLE_THROW(platform::errors::InvalidArgument( "shifts.size() should be less than 10, But received shifts.size() " "= %d", shifts.size())); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roll, ops::RollKernel<paddle::platform::CUDADeviceContext, float>, ops::RollKernel<paddle::platform::CUDADeviceContext, double>, ops::RollKernel<paddle::platform::CUDADeviceContext, int>, ops::RollKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::RollKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<float>>, ops::RollKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<double>>); REGISTER_OP_CUDA_KERNEL( roll_grad, ops::RollGradKernel<paddle::platform::CUDADeviceContext, float>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, double>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, int>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<float>>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<double>>);
roll_op.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/roll_op.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/core/utils/array.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; template <typename T, size_t Rank> __global__ void RollCudaKernel(const T* input, T* output, int64_t N, phi::Array<int64_t, Rank> shifts, phi::Array<int64_t, Rank> strides, phi::Array<int64_t, Rank> sizes) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } int64_t output_idx = idx; int64_t new_dim_idx = 0; #pragma unroll for (size_t i = 0; i < Rank; i++) { new_dim_idx = (idx / strides[i]) % sizes[i] + shifts[i]; if (new_dim_idx >= sizes[i]) { output_idx += (shifts[i] - sizes[i]) * strides[i]; } else { output_idx += shifts[i] * strides[i]; } } output[output_idx] = input[idx]; } template <typename T> class RollKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts"); if (context.HasInput("ShiftsTensor")) { const auto* shifts_tensor = context.Input<framework::Tensor>("ShiftsTensor"); PADDLE_ENFORCE_EQ( shifts_tensor->dims().size(), 1, platform::errors::InvalidArgument( "The rank of ShiftsTensor is expected to be 1, got %s", shifts_tensor->dims().size())); shifts = GetDataFromTensor<int64_t>(shifts_tensor); } std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis"); auto* in_data = in->data<T>(); auto* out_data = out->mutable_data<T>(context.GetPlace()); int64_t numel = in->numel(); auto stream = context.template device_context<platform::CUDADeviceContext>().stream(); size_t nums = shifts.size(); auto input_dim = in->dims(); auto stride_dim = phi::stride(input_dim); std::vector<int64_t> strides(nums), sizes(nums); if (dims.size() == 0) { strides[0] = 1; sizes[0] = numel; shifts[0] = (shifts[0] % numel + numel) % numel; } else { for (size_t i = 0; i < nums; i++) { int dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size(); int64_t size = input_dim[dim]; if (size != 0) { shifts[i] = (shifts[i] % size + size) % size; strides[i] = stride_dim[dim]; sizes[i] = size; } } } #define CALL_ROLL_CUDA_KERNEL(N) \ case N: { \ phi::Array<int64_t, N> _strides; \ phi::Array<int64_t, N> _shifts; \ phi::Array<int64_t, N> _sizes; \ for (size_t idx = 0; idx < N; ++idx) { \ _strides[idx] = strides[idx]; \ _shifts[idx] = shifts[idx]; \ _sizes[idx] = sizes[idx]; \ } \ RollCudaKernel< \ T, \ N><<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, \ PADDLE_CUDA_NUM_THREADS, 0, stream>>>(in_data, out_data, numel, \ _shifts, _strides, _sizes); \ break; \ } switch (nums) { CALL_ROLL_CUDA_KERNEL(1); CALL_ROLL_CUDA_KERNEL(2); CALL_ROLL_CUDA_KERNEL(3); CALL_ROLL_CUDA_KERNEL(4); CALL_ROLL_CUDA_KERNEL(5); CALL_ROLL_CUDA_KERNEL(6); CALL_ROLL_CUDA_KERNEL(7); CALL_ROLL_CUDA_KERNEL(8); CALL_ROLL_CUDA_KERNEL(9); default: PADDLE_THROW(platform::errors::InvalidArgument( "shifts.size() should be less than 10, But received shifts.size() " "= %d", shifts.size())); } } }; template <typename T> class RollGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>(framework::GradVarName("Out")); auto* out = context.Output<LoDTensor>(framework::GradVarName("X")); std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts"); if (context.HasInput("ShiftsTensor")) { const auto* shifts_tensor = context.Input<framework::Tensor>("ShiftsTensor"); PADDLE_ENFORCE_EQ( shifts_tensor->dims().size(), 1, platform::errors::InvalidArgument( "The rank of ShiftsTensor is expected to be 1, got %s", shifts_tensor->dims().size())); shifts = GetDataFromTensor<int64_t>(shifts_tensor); } std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis"); auto* in_data = in->data<T>(); auto* out_data = out->mutable_data<T>(context.GetPlace()); int64_t numel = in->numel(); auto stream = context.template device_context<platform::CUDADeviceContext>().stream(); size_t nums = shifts.size(); auto input_dim = in->dims(); auto stride_dim = phi::stride(input_dim); std::vector<int64_t> strides(nums), sizes(nums); if (dims.size() == 0) { strides[0] = 1; sizes[0] = numel; shifts[0] = ((-shifts[0]) % numel + numel) % numel; } else { for (size_t i = 0; i < nums; i++) { int dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size(); int64_t size = input_dim[dim]; if (size != 0) { shifts[i] = ((-shifts[i]) % size + size) % size; strides[i] = stride_dim[dim]; sizes[i] = size; } } } switch (nums) { CALL_ROLL_CUDA_KERNEL(1); CALL_ROLL_CUDA_KERNEL(2); CALL_ROLL_CUDA_KERNEL(3); CALL_ROLL_CUDA_KERNEL(4); CALL_ROLL_CUDA_KERNEL(5); CALL_ROLL_CUDA_KERNEL(6); CALL_ROLL_CUDA_KERNEL(7); CALL_ROLL_CUDA_KERNEL(8); CALL_ROLL_CUDA_KERNEL(9); default: PADDLE_THROW(platform::errors::InvalidArgument( "shifts.size() should be less than 10, But received shifts.size() " "= %d", shifts.size())); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roll, ops::RollKernel<paddle::platform::CUDADeviceContext, float>, ops::RollKernel<paddle::platform::CUDADeviceContext, double>, ops::RollKernel<paddle::platform::CUDADeviceContext, int>, ops::RollKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::RollKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<float>>, ops::RollKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<double>>); REGISTER_OP_CUDA_KERNEL( roll_grad, ops::RollGradKernel<paddle::platform::CUDADeviceContext, float>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, double>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, int>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, int64_t>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<float>>, ops::RollGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::complex<double>>);
edf1f407132ea58f0293dd909e071ef855e49e33.hip
// !!! This is a file automatically generated by hipify!!! #ifndef CUDA_H #define CUDA_H #include <stdio.h> #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> using namespace std; static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void mykernel( int *a, int *b ) { a[0] += b[0]; } extern "C" void Dosomething() { int a = 5; int b = 9; int *d_a, *d_b; // device pointer hipMalloc(&d_a, sizeof(int)); hipMalloc(&d_b, sizeof(int)); hipMemcpy(d_a, &a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, &b, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel) , dim3(16), dim3(16) , 0, 0, d_a, d_b); hipMemcpy(&a, d_a, sizeof(int), hipMemcpyDeviceToHost); std::cout << " The answer is " << a << "\n"; hipFree(d_a); hipFree(d_b); } __global__ void kernel( const int *a, const int *b, int *c, int Num ) { int idx = blockIdx.x; if(idx < Num) { c[idx] = a[idx] + b[idx]; } } extern "C" void addtwoVector( const int *a, const int *b, int *c, int Num ) { int *d_a, *d_b; // device pointer int *d_c; //cout << "The number of element is " << N << endl; hipMalloc(/*(void **)*/&d_a, Num*sizeof(int)); hipMalloc(/*(void **)*/&d_b, Num*sizeof(int)); hipMalloc(/*(void **)*/&d_c, Num*sizeof(int)); hipMemcpy(d_a, a, Num*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, Num*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel) , dim3(Num), dim3(1) , 0, 0, d_a, d_b, d_c, Num); hipMemcpy(c, d_c, Num*sizeof(int), hipMemcpyDeviceToHost); //std::cout << " The answer is " << a << "\n"; for(int i = 0; i < 10; i ++ ) { cout << c[i] << " "; } hipFree(d_a); hipFree(d_b); hipFree(d_c); } #define Num 41452143 //8223 * 71 * 71 #define N 50000000 __global__ void AddLong( const int* a, const int* b, int* c, int n) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx < n) { c[idx] = a[idx] + b[idx]; idx += blockDim.x*gridDim.x; } } extern "C" void addTwolongVector(const int* a, const int* b, int *c, int n ) { int *d_a, *d_b, *d_c; HANDLE_ERROR( hipMalloc(&d_a, n*sizeof(int) ) ); HANDLE_ERROR( hipMalloc(&d_b, n*sizeof(int) ) ); HANDLE_ERROR( hipMalloc(&d_c, n*sizeof(int) ) ); HANDLE_ERROR(hipMemcpy(d_a, a, n*sizeof(int), hipMemcpyHostToDevice) ); HANDLE_ERROR(hipMemcpy(d_b, b, n*sizeof(int), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( AddLong) , dim3(256), dim3(256), 0, 0, d_a, d_b, d_c, n); HANDLE_ERROR(hipMemcpy( c, d_c, n*sizeof(int), hipMemcpyDeviceToHost) ); //std::cout << " The answer is " << a << "\n"; for(int i = 0; i < 10; i ++ ) { cout << c[i] << " "; } hipFree(d_a); hipFree(d_b); hipFree(d_c); } #define DIM 1000 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b){} __device__ float magnitude2() { return r*r + i*i; } __device__ hipComplex operator*( const hipComplex &a ) { return hipComplex( r*a.r - i*a.i, i*a.r + r*a.i ); } __device__ hipComplex operator+( const hipComplex &a ) { return hipComplex( r + a.r, i + a.i ); } }; __device__ int julia( int x, int y ) { const float scale = 1.5; float jx = scale * (float)( DIM/2 - x )/ (DIM/2); float jy = scale * (float)( DIM/2 - y )/ (DIM/2); hipComplex c( -0.8, 0.156 ); hipComplex a( jx, jy ); for(int i = 0; i < 200; i ++ ) { a = a*a + c; if( a.magnitude2() > 1000 ) { return 0; } } return 1; } __global__ void Modify( unsigned char *ptr ) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y*gridDim.x; int JuliaValue = julia(x, y); ptr[offset*4 + 0] = 255*JuliaValue; ptr[offset*4 + 1] = 255*JuliaValue; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 0; } // I have to flatten the array of matrix to 1D array // For the array of set, I will have to save one additional array indicating number of element in each set // The output should be and 1D array then I will have to store it in a 2D array (or vector); #define MaxNumbNoise 8000 // the maximum so far could be 80000 for whole range 0 to 1, actuall 8223 #define MaxNumbNode 100 // the maximum so far could be 1000 for human connectome #define MaxNumbClusterings 15 // the maximum rarely goes beyond this number, depending on how users chooses the number of clusterings // to check if a entry belongs to a cluster in the clusterIndex // input: i is the number to check if it belongs to the cluster[index + clusterIndex*MaxNumbNode]; // *cluster is the array to store all the clusters // clusterIndex is the index th cluster in the set of clusters // input cluster is a global variable in GPU // Here I assume each cluster has MaxNumbNode // output: just check if entry i belongs to the cluster number clusterIndex __device__ bool belong_ToSet(int i, const int *cluster, int clusterIndex) { bool ReturnValue = false; for(int index = 0; index < MaxNumbNode; index ++) { if(i == cluster[index + clusterIndex*MaxNumbNode ]) { ReturnValue = true; return ReturnValue; } } return ReturnValue; } // The kernel to compute the average co_activation for one noise for all the clusters // input: g_Co_matrix is the list of coactivation matrices with respect to noise which is a global variable in GPU // input: row, column is the number of row, the number of column for each matrix. Normally row = column. // input: g_Clustering is the array which store the information of cluster which is a global variable in GPU // input: NumberClustering is the number of clusters // output: store all the co-activation matrix __global__ void compute_coactivation_average_one_parameter( const float *g_Co_matrix, const int row, const int column, const int *g_Clustering, float *g_output, int NumberClustering) { // How to determine the location of g_Co_Matrix for this threads int index = blockIdx.x * blockDim.x + threadIdx.x; // How to determine how many matrix one thread can calculate while(index < MaxNumbNoise ) {// Only the block with index smaller than MaxNumbNoise has to work // The matrix will be including the element from index*row*column to (index + 1)*row*colomn // Note that Co_matrix[index][j][k] = g_Co_matrix[index*row*column + j*row + k] // the emelement i of the clustering m clustering[m][i] = g_Clustering[i + m*MaxNumbNode], we just stop until we have one element = -1; // and the output[m][index], which is the average of cluster m, at noise i and output[m][index] = g_output[m + index*MaxNumbClusterings] for(int clusterIndex = 0; clusterIndex < NumberClustering; clusterIndex ++) { float ReturnValue = 0.0f; float numberOfpair = 0.0f; for(int i = 0; i < row ; i ++) { for(int j = i+1; j < column; j ++) { // check if i and j belong to g_Clustering at [clusterIndex] bool I_belong_clusterIndex = belong_ToSet(i, g_Clustering, clusterIndex); bool J_belong_clusterIndex = belong_ToSet(j, g_Clustering, clusterIndex); if(I_belong_clusterIndex && J_belong_clusterIndex) { ReturnValue += g_Co_matrix[index*row*column + i*row + j]; numberOfpair += 1.0f; } } }// end computing for this matrix if(numberOfpair != 0.0f) ReturnValue = ReturnValue/numberOfpair; g_output[index*NumberClustering + clusterIndex] = ReturnValue; // that's correct!!!!! } // End computing for each cluster at this noise index += blockDim.x * gridDim.x; } //end for this thread } // Input: the coactivation matrix list of all noise setting // Input: clustering list which include all the clustering // output is the ensemble of average of coactivation evolving by noises extern "C" void Average_Coactivation( const vector<vector<vector<float>>> &CoAct_matrixList, const vector<vector<int>> &clusteringList, vector<vector<float>> &output ) { int TheNumbOfNoise = (int) CoAct_matrixList.size(); // Why do I need that? //cout << "The number of noises is " << TheNumbOfNoise << endl; int SizeOfMatrix = (int)(CoAct_matrixList[0]).size(); //cout << "The size of matrix is " << SizeOfMatrix << endl; int NumberOfClustering = (int)clusteringList.size(); //cout << "The number of clustering is " << NumberOfClustering << endl; // should delete after copy the data over to device from host; // I hope this computer can handle that, otherwise I float *ArraynizeTheTwoDMatrix = new float[TheNumbOfNoise*SizeOfMatrix*SizeOfMatrix]; // need to setup again the memory thingy, maximum memory for heap in visual studio :) for(int i = 0; i < TheNumbOfNoise; i ++) { for(int j = 0; j < SizeOfMatrix; j ++) { for(int k = 0; k < SizeOfMatrix; k ++) { ArraynizeTheTwoDMatrix[i*SizeOfMatrix*SizeOfMatrix + j*SizeOfMatrix + k] = CoAct_matrixList[i][j][k]; } } }// I assume that I build the array correctly float *dev_CoActMat; // Allocate memory for the dev_CoActMat in device HANDLE_ERROR(hipMalloc(&dev_CoActMat, TheNumbOfNoise*SizeOfMatrix*SizeOfMatrix*sizeof(float))); HANDLE_ERROR(hipMemcpy(dev_CoActMat, ArraynizeTheTwoDMatrix, TheNumbOfNoise*SizeOfMatrix*SizeOfMatrix*sizeof(float), hipMemcpyHostToDevice)); // Deleting the array in host delete [] ArraynizeTheTwoDMatrix; // mission accomplished, its duty is done // now need to work with the clustering array in host // I have to allocate that amount of memory, because I have no ideas how users are going to choose their clusterings. int *ArrayClusteringList = new int[NumberOfClustering*MaxNumbNode]; for(int i = 0; i < NumberOfClustering*MaxNumbNode; i ++) { // clusteringList[m][n] = ArrayClusteringList[n + m*MaxNumbNode]; // How about ArrayClusteringList[p] = clusteringList[x][y]; 1. What are x, y? 2. What if clusteringList[x][y] doesn't exist? // 1. p = y + x*MaxNumbNode; so p/MaxNumbNode = y/MaxNumbNode + x, get floor [p/MaxNumbNode] = x because [y/MaxNumbNode] = 0; y = p - x*MaxNumbNode // 2. Check if x < clusteringList.size(), yes then check if y < clusteringList[x].size(), if yes assgin ArrayClusteringList[p] = clusteringList[x][y], else = -1, else = -1; int index_cluster = (int)floor((float)i/(float)MaxNumbNode); int index_element_inCluster = i - index_cluster*MaxNumbNode; if(index_cluster < NumberOfClustering) { if(index_element_inCluster < (int)clusteringList[index_cluster].size()) { ArrayClusteringList[i] = clusteringList[index_cluster][index_element_inCluster]; } else ArrayClusteringList[i] = -1; } else ArrayClusteringList[i] = -1; } // Copy the clustering array from host to device int *dev_ArrayClusteringList; HANDLE_ERROR(hipMalloc(&dev_ArrayClusteringList, NumberOfClustering*MaxNumbNode*sizeof(int))); HANDLE_ERROR(hipMemcpy(dev_ArrayClusteringList, ArrayClusteringList, NumberOfClustering*MaxNumbNode*sizeof(int), hipMemcpyHostToDevice )); // Deleting the array in host delete [] ArrayClusteringList; // Now I had already in the GPU memory the list of matrix and the list of clustering data, the only thing I have to do now is to allocate memory for the output data float *dev_output_average_coactivation_by_noises; HANDLE_ERROR(hipMalloc(&dev_output_average_coactivation_by_noises, TheNumbOfNoise*NumberOfClustering*sizeof(float))); //cout <<"Allocate memory succesfully"<< endl; // use MaxNumbNoise blocks and 1 threads per block to run the kernel hipLaunchKernelGGL(( compute_coactivation_average_one_parameter), dim3(128),dim3(128) , 0, 0, dev_CoActMat, SizeOfMatrix, SizeOfMatrix, dev_ArrayClusteringList, dev_output_average_coactivation_by_noises, NumberOfClustering); // I'm done computing till this step, now I have to copy from device to host then delete memory in GPU float *output_average_coativation_by_noises = new float[TheNumbOfNoise*NumberOfClustering]; HANDLE_ERROR(hipMemcpy(output_average_coativation_by_noises, dev_output_average_coactivation_by_noises, TheNumbOfNoise*NumberOfClustering*sizeof(float), hipMemcpyDeviceToHost)); // Now modify back to my data structure output if(!output.empty()) output.clear(); for(int i = 0; i < NumberOfClustering; i ++) { vector<float> Output_Average_Coactivation_clusterI; for(int j = 0; j < TheNumbOfNoise; j ++) { float EntryIJ = output_average_coativation_by_noises[j*NumberOfClustering + i]; Output_Average_Coactivation_clusterI.push_back(EntryIJ); } output.push_back(Output_Average_Coactivation_clusterI); } //cout << "The number of noise after computing is " << (int)output.size() << endl; // Now I can delete output_average in host delete [] output_average_coativation_by_noises; // Now I can delete memory in GPU hipFree(dev_CoActMat); hipFree(dev_ArrayClusteringList); hipFree(dev_output_average_coactivation_by_noises); } #endif
edf1f407132ea58f0293dd909e071ef855e49e33.cu
#ifndef CUDA_H #define CUDA_H #include <stdio.h> #include <iostream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> using namespace std; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void mykernel( int *a, int *b ) { a[0] += b[0]; } extern "C" void Dosomething() { int a = 5; int b = 9; int *d_a, *d_b; // device pointer cudaMalloc(&d_a, sizeof(int)); cudaMalloc(&d_b, sizeof(int)); cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); mykernel <<< 16, 16 >>> (d_a, d_b); cudaMemcpy(&a, d_a, sizeof(int), cudaMemcpyDeviceToHost); std::cout << " The answer is " << a << "\n"; cudaFree(d_a); cudaFree(d_b); } __global__ void kernel( const int *a, const int *b, int *c, int Num ) { int idx = blockIdx.x; if(idx < Num) { c[idx] = a[idx] + b[idx]; } } extern "C" void addtwoVector( const int *a, const int *b, int *c, int Num ) { int *d_a, *d_b; // device pointer int *d_c; //cout << "The number of element is " << N << endl; cudaMalloc(/*(void **)*/&d_a, Num*sizeof(int)); cudaMalloc(/*(void **)*/&d_b, Num*sizeof(int)); cudaMalloc(/*(void **)*/&d_c, Num*sizeof(int)); cudaMemcpy(d_a, a, Num*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, Num*sizeof(int), cudaMemcpyHostToDevice); kernel <<< Num, 1 >>> (d_a, d_b, d_c, Num); cudaMemcpy(c, d_c, Num*sizeof(int), cudaMemcpyDeviceToHost); //std::cout << " The answer is " << a << "\n"; for(int i = 0; i < 10; i ++ ) { cout << c[i] << " "; } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } #define Num 41452143 //8223 * 71 * 71 #define N 50000000 __global__ void AddLong( const int* a, const int* b, int* c, int n) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx < n) { c[idx] = a[idx] + b[idx]; idx += blockDim.x*gridDim.x; } } extern "C" void addTwolongVector(const int* a, const int* b, int *c, int n ) { int *d_a, *d_b, *d_c; HANDLE_ERROR( cudaMalloc(&d_a, n*sizeof(int) ) ); HANDLE_ERROR( cudaMalloc(&d_b, n*sizeof(int) ) ); HANDLE_ERROR( cudaMalloc(&d_c, n*sizeof(int) ) ); HANDLE_ERROR(cudaMemcpy(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice) ); HANDLE_ERROR(cudaMemcpy(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice) ); AddLong <<<256, 256>>> (d_a, d_b, d_c, n); HANDLE_ERROR(cudaMemcpy( c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost) ); //std::cout << " The answer is " << a << "\n"; for(int i = 0; i < 10; i ++ ) { cout << c[i] << " "; } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } #define DIM 1000 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b){} __device__ float magnitude2() { return r*r + i*i; } __device__ cuComplex operator*( const cuComplex &a ) { return cuComplex( r*a.r - i*a.i, i*a.r + r*a.i ); } __device__ cuComplex operator+( const cuComplex &a ) { return cuComplex( r + a.r, i + a.i ); } }; __device__ int julia( int x, int y ) { const float scale = 1.5; float jx = scale * (float)( DIM/2 - x )/ (DIM/2); float jy = scale * (float)( DIM/2 - y )/ (DIM/2); cuComplex c( -0.8, 0.156 ); cuComplex a( jx, jy ); for(int i = 0; i < 200; i ++ ) { a = a*a + c; if( a.magnitude2() > 1000 ) { return 0; } } return 1; } __global__ void Modify( unsigned char *ptr ) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y*gridDim.x; int JuliaValue = julia(x, y); ptr[offset*4 + 0] = 255*JuliaValue; ptr[offset*4 + 1] = 255*JuliaValue; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 0; } // I have to flatten the array of matrix to 1D array // For the array of set, I will have to save one additional array indicating number of element in each set // The output should be and 1D array then I will have to store it in a 2D array (or vector); #define MaxNumbNoise 8000 // the maximum so far could be 80000 for whole range 0 to 1, actuall 8223 #define MaxNumbNode 100 // the maximum so far could be 1000 for human connectome #define MaxNumbClusterings 15 // the maximum rarely goes beyond this number, depending on how users chooses the number of clusterings // to check if a entry belongs to a cluster in the clusterIndex // input: i is the number to check if it belongs to the cluster[index + clusterIndex*MaxNumbNode]; // *cluster is the array to store all the clusters // clusterIndex is the index th cluster in the set of clusters // input cluster is a global variable in GPU // Here I assume each cluster has MaxNumbNode // output: just check if entry i belongs to the cluster number clusterIndex __device__ bool belong_ToSet(int i, const int *cluster, int clusterIndex) { bool ReturnValue = false; for(int index = 0; index < MaxNumbNode; index ++) { if(i == cluster[index + clusterIndex*MaxNumbNode ]) { ReturnValue = true; return ReturnValue; } } return ReturnValue; } // The kernel to compute the average co_activation for one noise for all the clusters // input: g_Co_matrix is the list of coactivation matrices with respect to noise which is a global variable in GPU // input: row, column is the number of row, the number of column for each matrix. Normally row = column. // input: g_Clustering is the array which store the information of cluster which is a global variable in GPU // input: NumberClustering is the number of clusters // output: store all the co-activation matrix __global__ void compute_coactivation_average_one_parameter( const float *g_Co_matrix, const int row, const int column, const int *g_Clustering, float *g_output, int NumberClustering) { // How to determine the location of g_Co_Matrix for this threads int index = blockIdx.x * blockDim.x + threadIdx.x; // How to determine how many matrix one thread can calculate while(index < MaxNumbNoise ) {// Only the block with index smaller than MaxNumbNoise has to work // The matrix will be including the element from index*row*column to (index + 1)*row*colomn // Note that Co_matrix[index][j][k] = g_Co_matrix[index*row*column + j*row + k] // the emelement i of the clustering m clustering[m][i] = g_Clustering[i + m*MaxNumbNode], we just stop until we have one element = -1; // and the output[m][index], which is the average of cluster m, at noise i and output[m][index] = g_output[m + index*MaxNumbClusterings] for(int clusterIndex = 0; clusterIndex < NumberClustering; clusterIndex ++) { float ReturnValue = 0.0f; float numberOfpair = 0.0f; for(int i = 0; i < row ; i ++) { for(int j = i+1; j < column; j ++) { // check if i and j belong to g_Clustering at [clusterIndex] bool I_belong_clusterIndex = belong_ToSet(i, g_Clustering, clusterIndex); bool J_belong_clusterIndex = belong_ToSet(j, g_Clustering, clusterIndex); if(I_belong_clusterIndex && J_belong_clusterIndex) { ReturnValue += g_Co_matrix[index*row*column + i*row + j]; numberOfpair += 1.0f; } } }// end computing for this matrix if(numberOfpair != 0.0f) ReturnValue = ReturnValue/numberOfpair; g_output[index*NumberClustering + clusterIndex] = ReturnValue; // that's correct!!!!! } // End computing for each cluster at this noise index += blockDim.x * gridDim.x; } //end for this thread } // Input: the coactivation matrix list of all noise setting // Input: clustering list which include all the clustering // output is the ensemble of average of coactivation evolving by noises extern "C" void Average_Coactivation( const vector<vector<vector<float>>> &CoAct_matrixList, const vector<vector<int>> &clusteringList, vector<vector<float>> &output ) { int TheNumbOfNoise = (int) CoAct_matrixList.size(); // Why do I need that? //cout << "The number of noises is " << TheNumbOfNoise << endl; int SizeOfMatrix = (int)(CoAct_matrixList[0]).size(); //cout << "The size of matrix is " << SizeOfMatrix << endl; int NumberOfClustering = (int)clusteringList.size(); //cout << "The number of clustering is " << NumberOfClustering << endl; // should delete after copy the data over to device from host; // I hope this computer can handle that, otherwise I float *ArraynizeTheTwoDMatrix = new float[TheNumbOfNoise*SizeOfMatrix*SizeOfMatrix]; // need to setup again the memory thingy, maximum memory for heap in visual studio :) for(int i = 0; i < TheNumbOfNoise; i ++) { for(int j = 0; j < SizeOfMatrix; j ++) { for(int k = 0; k < SizeOfMatrix; k ++) { ArraynizeTheTwoDMatrix[i*SizeOfMatrix*SizeOfMatrix + j*SizeOfMatrix + k] = CoAct_matrixList[i][j][k]; } } }// I assume that I build the array correctly float *dev_CoActMat; // Allocate memory for the dev_CoActMat in device HANDLE_ERROR(cudaMalloc(&dev_CoActMat, TheNumbOfNoise*SizeOfMatrix*SizeOfMatrix*sizeof(float))); HANDLE_ERROR(cudaMemcpy(dev_CoActMat, ArraynizeTheTwoDMatrix, TheNumbOfNoise*SizeOfMatrix*SizeOfMatrix*sizeof(float), cudaMemcpyHostToDevice)); // Deleting the array in host delete [] ArraynizeTheTwoDMatrix; // mission accomplished, its duty is done // now need to work with the clustering array in host // I have to allocate that amount of memory, because I have no ideas how users are going to choose their clusterings. int *ArrayClusteringList = new int[NumberOfClustering*MaxNumbNode]; for(int i = 0; i < NumberOfClustering*MaxNumbNode; i ++) { // clusteringList[m][n] = ArrayClusteringList[n + m*MaxNumbNode]; // How about ArrayClusteringList[p] = clusteringList[x][y]; 1. What are x, y? 2. What if clusteringList[x][y] doesn't exist? // 1. p = y + x*MaxNumbNode; so p/MaxNumbNode = y/MaxNumbNode + x, get floor [p/MaxNumbNode] = x because [y/MaxNumbNode] = 0; y = p - x*MaxNumbNode // 2. Check if x < clusteringList.size(), yes then check if y < clusteringList[x].size(), if yes assgin ArrayClusteringList[p] = clusteringList[x][y], else = -1, else = -1; int index_cluster = (int)floor((float)i/(float)MaxNumbNode); int index_element_inCluster = i - index_cluster*MaxNumbNode; if(index_cluster < NumberOfClustering) { if(index_element_inCluster < (int)clusteringList[index_cluster].size()) { ArrayClusteringList[i] = clusteringList[index_cluster][index_element_inCluster]; } else ArrayClusteringList[i] = -1; } else ArrayClusteringList[i] = -1; } // Copy the clustering array from host to device int *dev_ArrayClusteringList; HANDLE_ERROR(cudaMalloc(&dev_ArrayClusteringList, NumberOfClustering*MaxNumbNode*sizeof(int))); HANDLE_ERROR(cudaMemcpy(dev_ArrayClusteringList, ArrayClusteringList, NumberOfClustering*MaxNumbNode*sizeof(int), cudaMemcpyHostToDevice )); // Deleting the array in host delete [] ArrayClusteringList; // Now I had already in the GPU memory the list of matrix and the list of clustering data, the only thing I have to do now is to allocate memory for the output data float *dev_output_average_coactivation_by_noises; HANDLE_ERROR(cudaMalloc(&dev_output_average_coactivation_by_noises, TheNumbOfNoise*NumberOfClustering*sizeof(float))); //cout <<"Allocate memory succesfully"<< endl; // use MaxNumbNoise blocks and 1 threads per block to run the kernel compute_coactivation_average_one_parameter<<< 128,128 >>>(dev_CoActMat, SizeOfMatrix, SizeOfMatrix, dev_ArrayClusteringList, dev_output_average_coactivation_by_noises, NumberOfClustering); // I'm done computing till this step, now I have to copy from device to host then delete memory in GPU float *output_average_coativation_by_noises = new float[TheNumbOfNoise*NumberOfClustering]; HANDLE_ERROR(cudaMemcpy(output_average_coativation_by_noises, dev_output_average_coactivation_by_noises, TheNumbOfNoise*NumberOfClustering*sizeof(float), cudaMemcpyDeviceToHost)); // Now modify back to my data structure output if(!output.empty()) output.clear(); for(int i = 0; i < NumberOfClustering; i ++) { vector<float> Output_Average_Coactivation_clusterI; for(int j = 0; j < TheNumbOfNoise; j ++) { float EntryIJ = output_average_coativation_by_noises[j*NumberOfClustering + i]; Output_Average_Coactivation_clusterI.push_back(EntryIJ); } output.push_back(Output_Average_Coactivation_clusterI); } //cout << "The number of noise after computing is " << (int)output.size() << endl; // Now I can delete output_average in host delete [] output_average_coativation_by_noises; // Now I can delete memory in GPU cudaFree(dev_CoActMat); cudaFree(dev_ArrayClusteringList); cudaFree(dev_output_average_coactivation_by_noises); } #endif
286893bf9afceb8ef74e30a6fa145f9e4c469e8c.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <map> #include <vector> #include <math.h> #include "base.h" #define BLOCK_DIM 256 // extern "C" // { /** * Compute distances from each B point to each A point * @param ref pointer to 1D feature refence vector * @param size_r size of vector reference * @param query pointer to 1D feature query vector of dimensions * @param size_q size of vector query * @param distance output vector of size_r * size_q * */ __global__ void cuda_compute_distance(float *coords, int size_r, float *newCoords, int size_q, float *distance) { // Gloabal thread ID int tid = blockIdx.x * blockDim.x + threadIdx.x; int query_id = tid; if (tid < size_q) { // Get distances for (int i = 0; i < size_r; i++) { float sum = 0.0; for (int j = 0; j < DIMENSION; j++) { sum += (coords[DIMENSION * i + j] - newCoords[tid * DIMENSION + j]) * (coords[DIMENSION * i + j] - newCoords[tid * DIMENSION + j]); } distance[tid * size_r + i] = sqrt(sum); } } } /** * For each reference point (i.e. each column) finds the k-th smallest distances * of the distance matrix and their respective indexes and gathers them at the top * of the 2 arrays. * * Since we only need to locate the k smallest distances, sorting the entire array * would not be very efficient if k is relatively small. Instead, we perform a * simple insertion sort by eventually inserting a given distance in the first * k values. * * @param dist distance matrix * @param dist_pitch pitch of the distance matrix given in number of columns * @param index index matrix * @param index_pitch pitch of the index matrix given in number of columns * @param width width of the distance matrix and of the index matrix * @param height height of the distance matrix * @param k number of values to find */ __global__ void modified_insertion_sort(float * dist, int * classes, int size_r, int size_q, int k, int num_classes ) { // Column position unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; // For this query point if (xIndex < size_q) { // Pointer shift float *p_dist = dist + xIndex * size_r; // Initialise the top classes array int *top_classes = new int[size_r]; for (int i = 0; i < size_r; i++) top_classes[i] = classes[i]; // Iterate through all points for (int i = 0; i < size_r; ++i) { // Starting from current index int j = i; // Remember class and distance float tmp_d = p_dist[i]; int tmp_c = top_classes[i]; // While have left and left > right while ((j > 0) && (p_dist[j - 1] > tmp_d)) { // Shift left -> right p_dist[j] = p_dist[j - 1]; top_classes[j] = top_classes[j - 1]; --j; } // Write the current distance and index at their position p_dist[j] = tmp_d; top_classes[j] = tmp_c; } // Get class for current point accourding to the top classes // Implement majority vote classes[size_r + xIndex] = top_classes[0]; int classes_sum = 0, classes_max = 1; for (int _class = 0; _class < num_classes; _class++) { classes_sum = 0; for (int j = 0; j < k; j++) if (_class == top_classes[j]) classes_sum += 1; if (classes_sum > classes_max) { classes[size_r + xIndex] = _class; classes_max = classes_sum; } } } } //}
286893bf9afceb8ef74e30a6fa145f9e4c469e8c.cu
#include <cstdio> #include <device_launch_parameters.h> #include <cuda.h> #include <map> #include <vector> #include <math.h> #include "base.h" #define BLOCK_DIM 256 // extern "C" // { /** * Compute distances from each B point to each A point * @param ref pointer to 1D feature refence vector * @param size_r size of vector reference * @param query pointer to 1D feature query vector of dimensions * @param size_q size of vector query * @param distance output vector of size_r * size_q * */ __global__ void cuda_compute_distance(float *coords, int size_r, float *newCoords, int size_q, float *distance) { // Gloabal thread ID int tid = blockIdx.x * blockDim.x + threadIdx.x; int query_id = tid; if (tid < size_q) { // Get distances for (int i = 0; i < size_r; i++) { float sum = 0.0; for (int j = 0; j < DIMENSION; j++) { sum += (coords[DIMENSION * i + j] - newCoords[tid * DIMENSION + j]) * (coords[DIMENSION * i + j] - newCoords[tid * DIMENSION + j]); } distance[tid * size_r + i] = sqrt(sum); } } } /** * For each reference point (i.e. each column) finds the k-th smallest distances * of the distance matrix and their respective indexes and gathers them at the top * of the 2 arrays. * * Since we only need to locate the k smallest distances, sorting the entire array * would not be very efficient if k is relatively small. Instead, we perform a * simple insertion sort by eventually inserting a given distance in the first * k values. * * @param dist distance matrix * @param dist_pitch pitch of the distance matrix given in number of columns * @param index index matrix * @param index_pitch pitch of the index matrix given in number of columns * @param width width of the distance matrix and of the index matrix * @param height height of the distance matrix * @param k number of values to find */ __global__ void modified_insertion_sort(float * dist, int * classes, int size_r, int size_q, int k, int num_classes ) { // Column position unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; // For this query point if (xIndex < size_q) { // Pointer shift float *p_dist = dist + xIndex * size_r; // Initialise the top classes array int *top_classes = new int[size_r]; for (int i = 0; i < size_r; i++) top_classes[i] = classes[i]; // Iterate through all points for (int i = 0; i < size_r; ++i) { // Starting from current index int j = i; // Remember class and distance float tmp_d = p_dist[i]; int tmp_c = top_classes[i]; // While have left and left > right while ((j > 0) && (p_dist[j - 1] > tmp_d)) { // Shift left -> right p_dist[j] = p_dist[j - 1]; top_classes[j] = top_classes[j - 1]; --j; } // Write the current distance and index at their position p_dist[j] = tmp_d; top_classes[j] = tmp_c; } // Get class for current point accourding to the top classes // Implement majority vote classes[size_r + xIndex] = top_classes[0]; int classes_sum = 0, classes_max = 1; for (int _class = 0; _class < num_classes; _class++) { classes_sum = 0; for (int j = 0; j < k; j++) if (_class == top_classes[j]) classes_sum += 1; if (classes_sum > classes_max) { classes[size_r + xIndex] = _class; classes_max = classes_sum; } } } } //}
77235f329747d756ae085bc4ab37155a5e6c1865.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda.hpp" using namespace cv::cuda; using namespace cv::cuda::device; //////////////////////////////////////////////////////////// // centeredGradient namespace tvl1flow { __global__ void centeredGradientKernel(const PtrStepSzf src, PtrStepf dx, PtrStepf dy) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= src.cols || y >= src.rows) return; dx(y, x) = 0.5f * (src(y, ::min(x + 1, src.cols - 1)) - src(y, ::max(x - 1, 0))); dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); } void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); hipLaunchKernelGGL(( centeredGradientKernel), dim3(grid), dim3(block), 0, stream, src, dx, dy); cudaSafeCall( hipGetLastError() ); if (!stream) cudaSafeCall( hipDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // warpBackward namespace tvl1flow { static __device__ __forceinline__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } struct SrcTex { virtual ~SrcTex() {} __device__ __forceinline__ virtual float I1(float x, float y) const = 0; __device__ __forceinline__ virtual float I1x(float x, float y) const = 0; __device__ __forceinline__ virtual float I1y(float x, float y) const = 0; }; texture<float, hipTextureType2D, hipReadModeElementType> tex_I1 (false, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> tex_I1x(false, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> tex_I1y(false, hipFilterModePoint, hipAddressModeClamp); struct SrcTexRef : SrcTex { __device__ __forceinline__ float I1(float x, float y) const CV_OVERRIDE { return tex2D(tex_I1, x, y); } __device__ __forceinline__ float I1x(float x, float y) const CV_OVERRIDE { return tex2D(tex_I1x, x, y); } __device__ __forceinline__ float I1y(float x, float y) const CV_OVERRIDE { return tex2D(tex_I1y, x, y); } }; struct SrcTexObj : SrcTex { __host__ SrcTexObj(hipTextureObject_t tex_obj_I1_, hipTextureObject_t tex_obj_I1x_, hipTextureObject_t tex_obj_I1y_) : tex_obj_I1(tex_obj_I1_), tex_obj_I1x(tex_obj_I1x_), tex_obj_I1y(tex_obj_I1y_) {} __device__ __forceinline__ float I1(float x, float y) const CV_OVERRIDE { return tex2D<float>(tex_obj_I1, x, y); } __device__ __forceinline__ float I1x(float x, float y) const CV_OVERRIDE { return tex2D<float>(tex_obj_I1x, x, y); } __device__ __forceinline__ float I1y(float x, float y) const CV_OVERRIDE { return tex2D<float>(tex_obj_I1y, x, y); } hipTextureObject_t tex_obj_I1; hipTextureObject_t tex_obj_I1x; hipTextureObject_t tex_obj_I1y; }; template < typename T, typename = typename std::enable_if<std::is_base_of<SrcTex, T>::value>::type > __global__ void warpBackwardKernel( const PtrStepSzf I0, const T src, const PtrStepf u1, const PtrStepf u2, PtrStepf I1w, PtrStepf I1wx, PtrStepf I1wy, PtrStepf grad, PtrStepf rho) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I0.cols || y >= I0.rows) return; const float u1Val = u1(y, x); const float u2Val = u2(y, x); const float wx = x + u1Val; const float wy = y + u2Val; const int xmin = ::ceilf(wx - 2.0f); const int xmax = ::floorf(wx + 2.0f); const int ymin = ::ceilf(wy - 2.0f); const int ymax = ::floorf(wy + 2.0f); float sum = 0.0f; float sumx = 0.0f; float sumy = 0.0f; float wsum = 0.0f; for (int cy = ymin; cy <= ymax; ++cy) { for (int cx = xmin; cx <= xmax; ++cx) { const float w = bicubicCoeff(wx - cx) * bicubicCoeff(wy - cy); sum += w * src.I1(cx, cy); sumx += w * src.I1x(cx, cy); sumy += w * src.I1y(cx, cy); wsum += w; } } const float coeff = 1.0f / wsum; const float I1wVal = sum * coeff; const float I1wxVal = sumx * coeff; const float I1wyVal = sumy * coeff; I1w(y, x) = I1wVal; I1wx(y, x) = I1wxVal; I1wy(y, x) = I1wyVal; const float Ix2 = I1wxVal * I1wxVal; const float Iy2 = I1wyVal * I1wyVal; // store the |Grad(I1)|^2 grad(y, x) = Ix2 + Iy2; // compute the constant part of the rho function const float I0Val = I0(y, x); rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; } void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); bool cc30 = deviceSupports(FEATURE_SET_COMPUTE_30); if (cc30) { hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; hipTextureObject_t texObj_I1 = 0, texObj_I1x = 0, texObj_I1y = 0; createTextureObjectPitch2D(&texObj_I1, I1, texDesc); createTextureObjectPitch2D(&texObj_I1x, I1x, texDesc); createTextureObjectPitch2D(&texObj_I1y, I1y, texDesc); warpBackwardKernel << <grid, block, 0, stream >> > (I0, SrcTexObj(texObj_I1, texObj_I1x, texObj_I1y), u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall(hipGetLastError()); if (!stream) cudaSafeCall(hipDeviceSynchronize()); else cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipDestroyTextureObject(texObj_I1)); cudaSafeCall(hipDestroyTextureObject(texObj_I1x)); cudaSafeCall(hipDestroyTextureObject(texObj_I1y)); } else { bindTexture(&tex_I1, I1); bindTexture(&tex_I1x, I1x); bindTexture(&tex_I1y, I1y); warpBackwardKernel << <grid, block, 0, stream >> > (I0, SrcTexRef(), u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall(hipGetLastError()); if (!stream) cudaSafeCall(hipDeviceSynchronize()); } } } //////////////////////////////////////////////////////////// // estimateU namespace tvl1flow { __device__ float divergence(const PtrStepf& v1, const PtrStepf& v2, int y, int x) { if (x > 0 && y > 0) { const float v1x = v1(y, x) - v1(y, x - 1); const float v2y = v2(y, x) - v2(y - 1, x); return v1x + v2y; } else { if (y > 0) return v1(y, 0) + v2(y, 0) - v2(y - 1, 0); else { if (x > 0) return v1(0, x) - v1(0, x - 1) + v2(0, x); else return v1(0, 0) + v2(0, 0); } } } __global__ void estimateUKernel(const PtrStepSzf I1wx, const PtrStepf I1wy, const PtrStepf grad, const PtrStepf rho_c, const PtrStepf p11, const PtrStepf p12, const PtrStepf p21, const PtrStepf p22, const PtrStepf p31, const PtrStepf p32, PtrStepf u1, PtrStepf u2, PtrStepf u3, PtrStepf error, const float l_t, const float theta, const float gamma, const bool calcError) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I1wx.cols || y >= I1wx.rows) return; const float I1wxVal = I1wx(y, x); const float I1wyVal = I1wy(y, x); const float gradVal = grad(y, x); const float u1OldVal = u1(y, x); const float u2OldVal = u2(y, x); const float u3OldVal = gamma ? u3(y, x) : 0; const float rho = rho_c(y, x) + (I1wxVal * u1OldVal + I1wyVal * u2OldVal + gamma * u3OldVal); // estimate the values of the variable (v1, v2) (thresholding operator TH) float d1 = 0.0f; float d2 = 0.0f; float d3 = 0.0f; if (rho < -l_t * gradVal) { d1 = l_t * I1wxVal; d2 = l_t * I1wyVal; if (gamma) d3 = l_t * gamma; } else if (rho > l_t * gradVal) { d1 = -l_t * I1wxVal; d2 = -l_t * I1wyVal; if (gamma) d3 = -l_t * gamma; } else if (gradVal > numeric_limits<float>::epsilon()) { const float fi = -rho / gradVal; d1 = fi * I1wxVal; d2 = fi * I1wyVal; if (gamma) d3 = fi * gamma; } const float v1 = u1OldVal + d1; const float v2 = u2OldVal + d2; const float v3 = u3OldVal + d3; // compute the divergence of the dual variable (p1, p2) const float div_p1 = divergence(p11, p12, y, x); const float div_p2 = divergence(p21, p22, y, x); const float div_p3 = gamma ? divergence(p31, p32, y, x) : 0; // estimate the values of the optical flow (u1, u2) const float u1NewVal = v1 + theta * div_p1; const float u2NewVal = v2 + theta * div_p2; const float u3NewVal = gamma ? v3 + theta * div_p3 : 0; u1(y, x) = u1NewVal; u2(y, x) = u2NewVal; if (gamma) u3(y, x) = u3NewVal; if (calcError) { const float n1 = (u1OldVal - u1NewVal) * (u1OldVal - u1NewVal); const float n2 = (u2OldVal - u2NewVal) * (u2OldVal - u2NewVal); error(y, x) = n1 + n2; } } void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, float l_t, float theta, float gamma, bool calcError, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); hipLaunchKernelGGL(( estimateUKernel), dim3(grid), dim3(block), 0, stream, I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError); cudaSafeCall( hipGetLastError() ); if (!stream) cudaSafeCall( hipDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // estimateDualVariables namespace tvl1flow { __global__ void estimateDualVariablesKernel(const PtrStepSzf u1, const PtrStepf u2, const PtrStepSzf u3, PtrStepf p11, PtrStepf p12, PtrStepf p21, PtrStepf p22, PtrStepf p31, PtrStepf p32, const float taut, const float gamma) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= u1.cols || y >= u1.rows) return; const float u1x = u1(y, ::min(x + 1, u1.cols - 1)) - u1(y, x); const float u1y = u1(::min(y + 1, u1.rows - 1), x) - u1(y, x); const float u2x = u2(y, ::min(x + 1, u1.cols - 1)) - u2(y, x); const float u2y = u2(::min(y + 1, u1.rows - 1), x) - u2(y, x); const float u3x = gamma ? u3(y, ::min(x + 1, u1.cols - 1)) - u3(y, x) : 0; const float u3y = gamma ? u3(::min(y + 1, u1.rows - 1), x) - u3(y, x) : 0; const float g1 = ::hypotf(u1x, u1y); const float g2 = ::hypotf(u2x, u2y); const float g3 = gamma ? ::hypotf(u3x, u3y) : 0; const float ng1 = 1.0f + taut * g1; const float ng2 = 1.0f + taut * g2; const float ng3 = gamma ? 1.0f + taut * g3 : 0; p11(y, x) = (p11(y, x) + taut * u1x) / ng1; p12(y, x) = (p12(y, x) + taut * u1y) / ng1; p21(y, x) = (p21(y, x) + taut * u2x) / ng2; p22(y, x) = (p22(y, x) + taut * u2y) / ng2; if (gamma) { p31(y, x) = (p31(y, x) + taut * u3x) / ng3; p32(y, x) = (p32(y, x) + taut * u3y) / ng3; } } void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, float gamma, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); hipLaunchKernelGGL(( estimateDualVariablesKernel), dim3(grid), dim3(block), 0, stream, u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); cudaSafeCall( hipGetLastError() ); if (!stream) cudaSafeCall( hipDeviceSynchronize() ); } } #endif // !defined CUDA_DISABLER
77235f329747d756ae085bc4ab37155a5e6c1865.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda.hpp" using namespace cv::cuda; using namespace cv::cuda::device; //////////////////////////////////////////////////////////// // centeredGradient namespace tvl1flow { __global__ void centeredGradientKernel(const PtrStepSzf src, PtrStepf dx, PtrStepf dy) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= src.cols || y >= src.rows) return; dx(y, x) = 0.5f * (src(y, ::min(x + 1, src.cols - 1)) - src(y, ::max(x - 1, 0))); dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); } void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); centeredGradientKernel<<<grid, block, 0, stream>>>(src, dx, dy); cudaSafeCall( cudaGetLastError() ); if (!stream) cudaSafeCall( cudaDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // warpBackward namespace tvl1flow { static __device__ __forceinline__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } struct SrcTex { virtual ~SrcTex() {} __device__ __forceinline__ virtual float I1(float x, float y) const = 0; __device__ __forceinline__ virtual float I1x(float x, float y) const = 0; __device__ __forceinline__ virtual float I1y(float x, float y) const = 0; }; texture<float, cudaTextureType2D, cudaReadModeElementType> tex_I1 (false, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> tex_I1x(false, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> tex_I1y(false, cudaFilterModePoint, cudaAddressModeClamp); struct SrcTexRef : SrcTex { __device__ __forceinline__ float I1(float x, float y) const CV_OVERRIDE { return tex2D(tex_I1, x, y); } __device__ __forceinline__ float I1x(float x, float y) const CV_OVERRIDE { return tex2D(tex_I1x, x, y); } __device__ __forceinline__ float I1y(float x, float y) const CV_OVERRIDE { return tex2D(tex_I1y, x, y); } }; struct SrcTexObj : SrcTex { __host__ SrcTexObj(cudaTextureObject_t tex_obj_I1_, cudaTextureObject_t tex_obj_I1x_, cudaTextureObject_t tex_obj_I1y_) : tex_obj_I1(tex_obj_I1_), tex_obj_I1x(tex_obj_I1x_), tex_obj_I1y(tex_obj_I1y_) {} __device__ __forceinline__ float I1(float x, float y) const CV_OVERRIDE { return tex2D<float>(tex_obj_I1, x, y); } __device__ __forceinline__ float I1x(float x, float y) const CV_OVERRIDE { return tex2D<float>(tex_obj_I1x, x, y); } __device__ __forceinline__ float I1y(float x, float y) const CV_OVERRIDE { return tex2D<float>(tex_obj_I1y, x, y); } cudaTextureObject_t tex_obj_I1; cudaTextureObject_t tex_obj_I1x; cudaTextureObject_t tex_obj_I1y; }; template < typename T, typename = typename std::enable_if<std::is_base_of<SrcTex, T>::value>::type > __global__ void warpBackwardKernel( const PtrStepSzf I0, const T src, const PtrStepf u1, const PtrStepf u2, PtrStepf I1w, PtrStepf I1wx, PtrStepf I1wy, PtrStepf grad, PtrStepf rho) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I0.cols || y >= I0.rows) return; const float u1Val = u1(y, x); const float u2Val = u2(y, x); const float wx = x + u1Val; const float wy = y + u2Val; const int xmin = ::ceilf(wx - 2.0f); const int xmax = ::floorf(wx + 2.0f); const int ymin = ::ceilf(wy - 2.0f); const int ymax = ::floorf(wy + 2.0f); float sum = 0.0f; float sumx = 0.0f; float sumy = 0.0f; float wsum = 0.0f; for (int cy = ymin; cy <= ymax; ++cy) { for (int cx = xmin; cx <= xmax; ++cx) { const float w = bicubicCoeff(wx - cx) * bicubicCoeff(wy - cy); sum += w * src.I1(cx, cy); sumx += w * src.I1x(cx, cy); sumy += w * src.I1y(cx, cy); wsum += w; } } const float coeff = 1.0f / wsum; const float I1wVal = sum * coeff; const float I1wxVal = sumx * coeff; const float I1wyVal = sumy * coeff; I1w(y, x) = I1wVal; I1wx(y, x) = I1wxVal; I1wy(y, x) = I1wyVal; const float Ix2 = I1wxVal * I1wxVal; const float Iy2 = I1wyVal * I1wyVal; // store the |Grad(I1)|^2 grad(y, x) = Ix2 + Iy2; // compute the constant part of the rho function const float I0Val = I0(y, x); rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; } void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); bool cc30 = deviceSupports(FEATURE_SET_COMPUTE_30); if (cc30) { cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; cudaTextureObject_t texObj_I1 = 0, texObj_I1x = 0, texObj_I1y = 0; createTextureObjectPitch2D(&texObj_I1, I1, texDesc); createTextureObjectPitch2D(&texObj_I1x, I1x, texDesc); createTextureObjectPitch2D(&texObj_I1y, I1y, texDesc); warpBackwardKernel << <grid, block, 0, stream >> > (I0, SrcTexObj(texObj_I1, texObj_I1x, texObj_I1y), u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall(cudaGetLastError()); if (!stream) cudaSafeCall(cudaDeviceSynchronize()); else cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaDestroyTextureObject(texObj_I1)); cudaSafeCall(cudaDestroyTextureObject(texObj_I1x)); cudaSafeCall(cudaDestroyTextureObject(texObj_I1y)); } else { bindTexture(&tex_I1, I1); bindTexture(&tex_I1x, I1x); bindTexture(&tex_I1y, I1y); warpBackwardKernel << <grid, block, 0, stream >> > (I0, SrcTexRef(), u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall(cudaGetLastError()); if (!stream) cudaSafeCall(cudaDeviceSynchronize()); } } } //////////////////////////////////////////////////////////// // estimateU namespace tvl1flow { __device__ float divergence(const PtrStepf& v1, const PtrStepf& v2, int y, int x) { if (x > 0 && y > 0) { const float v1x = v1(y, x) - v1(y, x - 1); const float v2y = v2(y, x) - v2(y - 1, x); return v1x + v2y; } else { if (y > 0) return v1(y, 0) + v2(y, 0) - v2(y - 1, 0); else { if (x > 0) return v1(0, x) - v1(0, x - 1) + v2(0, x); else return v1(0, 0) + v2(0, 0); } } } __global__ void estimateUKernel(const PtrStepSzf I1wx, const PtrStepf I1wy, const PtrStepf grad, const PtrStepf rho_c, const PtrStepf p11, const PtrStepf p12, const PtrStepf p21, const PtrStepf p22, const PtrStepf p31, const PtrStepf p32, PtrStepf u1, PtrStepf u2, PtrStepf u3, PtrStepf error, const float l_t, const float theta, const float gamma, const bool calcError) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I1wx.cols || y >= I1wx.rows) return; const float I1wxVal = I1wx(y, x); const float I1wyVal = I1wy(y, x); const float gradVal = grad(y, x); const float u1OldVal = u1(y, x); const float u2OldVal = u2(y, x); const float u3OldVal = gamma ? u3(y, x) : 0; const float rho = rho_c(y, x) + (I1wxVal * u1OldVal + I1wyVal * u2OldVal + gamma * u3OldVal); // estimate the values of the variable (v1, v2) (thresholding operator TH) float d1 = 0.0f; float d2 = 0.0f; float d3 = 0.0f; if (rho < -l_t * gradVal) { d1 = l_t * I1wxVal; d2 = l_t * I1wyVal; if (gamma) d3 = l_t * gamma; } else if (rho > l_t * gradVal) { d1 = -l_t * I1wxVal; d2 = -l_t * I1wyVal; if (gamma) d3 = -l_t * gamma; } else if (gradVal > numeric_limits<float>::epsilon()) { const float fi = -rho / gradVal; d1 = fi * I1wxVal; d2 = fi * I1wyVal; if (gamma) d3 = fi * gamma; } const float v1 = u1OldVal + d1; const float v2 = u2OldVal + d2; const float v3 = u3OldVal + d3; // compute the divergence of the dual variable (p1, p2) const float div_p1 = divergence(p11, p12, y, x); const float div_p2 = divergence(p21, p22, y, x); const float div_p3 = gamma ? divergence(p31, p32, y, x) : 0; // estimate the values of the optical flow (u1, u2) const float u1NewVal = v1 + theta * div_p1; const float u2NewVal = v2 + theta * div_p2; const float u3NewVal = gamma ? v3 + theta * div_p3 : 0; u1(y, x) = u1NewVal; u2(y, x) = u2NewVal; if (gamma) u3(y, x) = u3NewVal; if (calcError) { const float n1 = (u1OldVal - u1NewVal) * (u1OldVal - u1NewVal); const float n2 = (u2OldVal - u2NewVal) * (u2OldVal - u2NewVal); error(y, x) = n1 + n2; } } void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf error, float l_t, float theta, float gamma, bool calcError, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); estimateUKernel<<<grid, block, 0, stream>>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, p31, p32, u1, u2, u3, error, l_t, theta, gamma, calcError); cudaSafeCall( cudaGetLastError() ); if (!stream) cudaSafeCall( cudaDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // estimateDualVariables namespace tvl1flow { __global__ void estimateDualVariablesKernel(const PtrStepSzf u1, const PtrStepf u2, const PtrStepSzf u3, PtrStepf p11, PtrStepf p12, PtrStepf p21, PtrStepf p22, PtrStepf p31, PtrStepf p32, const float taut, const float gamma) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= u1.cols || y >= u1.rows) return; const float u1x = u1(y, ::min(x + 1, u1.cols - 1)) - u1(y, x); const float u1y = u1(::min(y + 1, u1.rows - 1), x) - u1(y, x); const float u2x = u2(y, ::min(x + 1, u1.cols - 1)) - u2(y, x); const float u2y = u2(::min(y + 1, u1.rows - 1), x) - u2(y, x); const float u3x = gamma ? u3(y, ::min(x + 1, u1.cols - 1)) - u3(y, x) : 0; const float u3y = gamma ? u3(::min(y + 1, u1.rows - 1), x) - u3(y, x) : 0; const float g1 = ::hypotf(u1x, u1y); const float g2 = ::hypotf(u2x, u2y); const float g3 = gamma ? ::hypotf(u3x, u3y) : 0; const float ng1 = 1.0f + taut * g1; const float ng2 = 1.0f + taut * g2; const float ng3 = gamma ? 1.0f + taut * g3 : 0; p11(y, x) = (p11(y, x) + taut * u1x) / ng1; p12(y, x) = (p12(y, x) + taut * u1y) / ng1; p21(y, x) = (p21(y, x) + taut * u2x) / ng2; p22(y, x) = (p22(y, x) + taut * u2y) / ng2; if (gamma) { p31(y, x) = (p31(y, x) + taut * u3x) / ng3; p32(y, x) = (p32(y, x) + taut * u3y) / ng3; } } void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf u3, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf p31, PtrStepSzf p32, float taut, float gamma, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); estimateDualVariablesKernel<<<grid, block, 0, stream>>>(u1, u2, u3, p11, p12, p21, p22, p31, p32, taut, gamma); cudaSafeCall( cudaGetLastError() ); if (!stream) cudaSafeCall( cudaDeviceSynchronize() ); } } #endif // !defined CUDA_DISABLER
44d64e5925476364fd6e02f1e1f290ae7c149946.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ __forceinline__ void copy_c(float const *in, float *out, int slicesizein, int slicesizeout, int C) { // *out = *in; for (size_t c(0); c < C; ++c) out[c * slicesizeout] = in[c * slicesizein]; } __device__ __forceinline__ void add_c(float const *in, float *out, int slicesizein, int slicesizeout, int C) { // *out = *in + *out; for (size_t c(0); c < C; ++c) out[c * slicesizeout] += in[c * slicesizein]; } __device__ __forceinline__ int get_index(int X, int Y, int Z, int C, int x, int y, int z) { return z * (C * X * Y) + y * X + x; } __global__ void unshift_kernel(float const *in, float *out, int X, int Y, int C, int dx, int dy, float const beta) { int x(threadIdx.x + blockDim.x * blockIdx.x); int y(x / X); x = x % X; int x_to(x + dx); int y_to(y + dy); if (x >= X || y >= Y || x_to >= X || y_to >= Y || x_to < 0 || y_to < 0) return; if (beta>0) add_c(in + get_index(X, Y, 1, C, x_to, y_to, 0), out + get_index(X, Y, 1, C, x, y, 0), X * Y, X * Y, C); else copy_c(in + get_index(X, Y, 1, C, x_to, y_to, 0), out + get_index(X, Y, 1, C, x, y, 0), X * Y, X * Y, C); }
44d64e5925476364fd6e02f1e1f290ae7c149946.cu
#include "includes.h" __device__ __forceinline__ void copy_c(float const *in, float *out, int slicesizein, int slicesizeout, int C) { // *out = *in; for (size_t c(0); c < C; ++c) out[c * slicesizeout] = in[c * slicesizein]; } __device__ __forceinline__ void add_c(float const *in, float *out, int slicesizein, int slicesizeout, int C) { // *out = *in + *out; for (size_t c(0); c < C; ++c) out[c * slicesizeout] += in[c * slicesizein]; } __device__ __forceinline__ int get_index(int X, int Y, int Z, int C, int x, int y, int z) { return z * (C * X * Y) + y * X + x; } __global__ void unshift_kernel(float const *in, float *out, int X, int Y, int C, int dx, int dy, float const beta) { int x(threadIdx.x + blockDim.x * blockIdx.x); int y(x / X); x = x % X; int x_to(x + dx); int y_to(y + dy); if (x >= X || y >= Y || x_to >= X || y_to >= Y || x_to < 0 || y_to < 0) return; if (beta>0) add_c(in + get_index(X, Y, 1, C, x_to, y_to, 0), out + get_index(X, Y, 1, C, x, y, 0), X * Y, X * Y, C); else copy_c(in + get_index(X, Y, 1, C, x_to, y_to, 0), out + get_index(X, Y, 1, C, x, y, 0), X * Y, X * Y, C); }
4a2ab268cc5fa9f31551baca8e4a6725528ccb18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "header_hip.cuh" #include "sort_hip.cuh" #include "solver.cuh" #include <helper_cuda.h> __constant__ float cities[N_CITIES][2]; int get_nb_max_thread(hipDeviceProp_t deviceProp){ int quantity_in_each_thread = sizeof(Individu) + 10 * sizeof(int); int memory_available = deviceProp.sharedMemPerBlock - (N_CITIES * sizeof(bool) + (N_CITIES * sizeof(int))); int nbThreads = memory_available / quantity_in_each_thread; int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; if(nbThreads > maxThreadsPerBlock) nbThreads = maxThreadsPerBlock; return nbThreads; } void save_to_file(float cpuCities[N_CITIES][2], int *paths){ FILE *f = fopen("/tmp/Output.json", "w"); fprintf(f, "{\"nGeneration\":%d, \"cities\":[", N_GENERATION); for(int i = 0; i < N_CITIES; ++i) { fprintf(f, "\n[%f,%f]%c", cpuCities[i][0], cpuCities[i][1], i == N_CITIES - 1 ? ' ' : ','); } fprintf(f, "],\"islands\":["); for(int i = 0; i < N_ISLAND; ++i){ fprintf(f, "\n["); for(int c = 0; c < N_CITIES; ++c){ fprintf(f, "%d%c", paths[i * N_ISLAND + c], c == N_CITIES - 1 ? ' ' : ','); } fprintf(f, "]%c", i == N_ISLAND - 1 ? ' ' : ','); } fprintf(f, "]}\n"); fclose(f); } int main() { // Init CUDA hipSetDevice(0); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); srand(0); // Init random cities float cpuCities[N_CITIES][2]; for(int i = 0; i < N_CITIES; ++i) { cpuCities[i][0] = (float)rand() / RAND_MAX; cpuCities[i][1] = (float)rand() / RAND_MAX; //printf("(cpu) %f %f\n", cpuCities[i][0], cpuCities[i][1]); } int *paths = (int *)malloc(sizeof(int) * N_ISLAND * N_CITIES); int *g_paths; hipMalloc(&g_paths, sizeof(int) * N_ISLAND * N_CITIES); checkCudaErrors(hipMemcpyToSymbol(cities, cpuCities, sizeof(float) * N_CITIES * 2)); // Init gpu migrants Individu *gpuMigrants; // Migrants are not in shared memory because they need to be used by all bloc checkCudaErrors(hipMalloc(&gpuMigrants, sizeof(Individu) * N_ISLAND)); // Init threads int nbThreads = get_nb_max_thread(deviceProp); printf("Launching on %d threads\n", nbThreads); hipLaunchKernelGGL(( solve) , dim3(N_ISLAND), dim3(nbThreads), (nbThreads * sizeof(Individu)) + (N_CITIES * sizeof(int)) + (N_CITIES * sizeof(bool)), 0, gpuMigrants, g_paths); hipDeviceSynchronize(); hipMemcpy(paths, g_paths, sizeof(float) * N_ISLAND * N_CITIES, hipMemcpyDeviceToHost); save_to_file(cpuCities, paths); //frees hipFree(gpuMigrants); hipFree(g_paths); free(paths); hipDeviceReset(); return 0; }
4a2ab268cc5fa9f31551baca8e4a6725528ccb18.cu
#include "header.cuh" #include "sort.cuh" #include "solver.cuh" #include <helper_cuda.h> __constant__ float cities[N_CITIES][2]; int get_nb_max_thread(cudaDeviceProp deviceProp){ int quantity_in_each_thread = sizeof(Individu) + 10 * sizeof(int); int memory_available = deviceProp.sharedMemPerBlock - (N_CITIES * sizeof(bool) + (N_CITIES * sizeof(int))); int nbThreads = memory_available / quantity_in_each_thread; int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock; if(nbThreads > maxThreadsPerBlock) nbThreads = maxThreadsPerBlock; return nbThreads; } void save_to_file(float cpuCities[N_CITIES][2], int *paths){ FILE *f = fopen("/tmp/Output.json", "w"); fprintf(f, "{\"nGeneration\":%d, \"cities\":[", N_GENERATION); for(int i = 0; i < N_CITIES; ++i) { fprintf(f, "\n[%f,%f]%c", cpuCities[i][0], cpuCities[i][1], i == N_CITIES - 1 ? ' ' : ','); } fprintf(f, "],\"islands\":["); for(int i = 0; i < N_ISLAND; ++i){ fprintf(f, "\n["); for(int c = 0; c < N_CITIES; ++c){ fprintf(f, "%d%c", paths[i * N_ISLAND + c], c == N_CITIES - 1 ? ' ' : ','); } fprintf(f, "]%c", i == N_ISLAND - 1 ? ' ' : ','); } fprintf(f, "]}\n"); fclose(f); } int main() { // Init CUDA cudaSetDevice(0); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); srand(0); // Init random cities float cpuCities[N_CITIES][2]; for(int i = 0; i < N_CITIES; ++i) { cpuCities[i][0] = (float)rand() / RAND_MAX; cpuCities[i][1] = (float)rand() / RAND_MAX; //printf("(cpu) %f %f\n", cpuCities[i][0], cpuCities[i][1]); } int *paths = (int *)malloc(sizeof(int) * N_ISLAND * N_CITIES); int *g_paths; cudaMalloc(&g_paths, sizeof(int) * N_ISLAND * N_CITIES); checkCudaErrors(cudaMemcpyToSymbol(cities, cpuCities, sizeof(float) * N_CITIES * 2)); // Init gpu migrants Individu *gpuMigrants; // Migrants are not in shared memory because they need to be used by all bloc checkCudaErrors(cudaMalloc(&gpuMigrants, sizeof(Individu) * N_ISLAND)); // Init threads int nbThreads = get_nb_max_thread(deviceProp); printf("Launching on %d threads\n", nbThreads); solve <<<N_ISLAND, nbThreads, (nbThreads * sizeof(Individu)) + (N_CITIES * sizeof(int)) + (N_CITIES * sizeof(bool))>>>(gpuMigrants, g_paths); cudaDeviceSynchronize(); cudaMemcpy(paths, g_paths, sizeof(float) * N_ISLAND * N_CITIES, cudaMemcpyDeviceToHost); save_to_file(cpuCities, paths); //frees cudaFree(gpuMigrants); cudaFree(g_paths); free(paths); cudaDeviceReset(); return 0; }
ebcf7126b5950cff1a7115e26d73d73619c7c840.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* compile as: nvcc bindingTexture.cu */ #include <stdio.h> #include <stdlib.h> #include <math.h> #define N 1024 texture<float, 1, hipReadModeElementType> tex; // texture reference name must be known at compile time __global__ void kernel() { int i = blockIdx.x *blockDim.x + threadIdx.x; float x = tex1Dfetch(tex, i); // do some work using x... } void call_kernel(float *buffer) { // bind texture to buffer hipBindTexture(0, tex, buffer, N*sizeof(float)); dim3 block(128,1,1); dim3 grid(N/block.x,1,1); hipLaunchKernelGGL(( kernel) , dim3(grid), dim3(block), 0, 0, ); // unbind texture from buffer hipUnbindTexture(tex); } int main() { // declare and allocate memory float *buffer; hipMalloc(&buffer, N*sizeof(float)); call_kernel(buffer); hipFree(buffer); }
ebcf7126b5950cff1a7115e26d73d73619c7c840.cu
/* compile as: nvcc bindingTexture.cu */ #include <stdio.h> #include <stdlib.h> #include <math.h> #define N 1024 texture<float, 1, cudaReadModeElementType> tex; // texture reference name must be known at compile time __global__ void kernel() { int i = blockIdx.x *blockDim.x + threadIdx.x; float x = tex1Dfetch(tex, i); // do some work using x... } void call_kernel(float *buffer) { // bind texture to buffer cudaBindTexture(0, tex, buffer, N*sizeof(float)); dim3 block(128,1,1); dim3 grid(N/block.x,1,1); kernel <<<grid, block>>>(); // unbind texture from buffer cudaUnbindTexture(tex); } int main() { // declare and allocate memory float *buffer; cudaMalloc(&buffer, N*sizeof(float)); call_kernel(buffer); cudaFree(buffer); }
9ef28f9b74d13651ff519b0ab1df1e46a3e3aeb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <utility> #include <THHUNN/THHUNN.h> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHApply.cuh> #include <THHUNN/common.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/HIPGraphsUtils.cuh> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> // copied from cutorch/lib/THC/THCTensorRandom.cu #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #define NUM_BLOCKS(n) \ (::min((int)THCCeilDiv(n, (ptrdiff_t)BLOCK_SIZE), MAX_NUM_BLOCKS)) template<typename T> inline T __device__ curand_uniform_type(hiprandStatePhilox4_32_10_t *state); template <> inline THHalf __device__ curand_uniform_type<THHalf>(hiprandStatePhilox4_32_10_t *state) { auto rand = hiprand_uniform4(state); return ScalarConvert<float, THHalf>::to(rand.x); } template <> inline float __device__ curand_uniform_type<float>(hiprandStatePhilox4_32_10_t *state) { auto rand = hiprand_uniform4(state); return rand.x; } template <> inline double __device__ curand_uniform_type<double>(hiprandStatePhilox4_32_10_t *state) { auto rand = hiprand_uniform2_double(state); return rand.x; } template <typename T> __global__ void rreluUpdateOutputTrain(int n, at::PhiloxCudaState philox_args, T *input, T* noise, T *output, double a, double b) { auto seeds = at::cuda::philox::unpack(philox_args); int idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); CUDA_KERNEL_LOOP(i, n) { if (input[i] <= 0) { T r = curand_uniform_type<T>(&state); r = ScalarConvert<double, T>::to(r * (b-a) + a); output[i] = input[i] * r; noise[i] = r; } else { output[i] = input[i]; noise[i] = ScalarConvert<int, T>::to(1); } } } template <typename T> struct RReLUUpdateOutputEval_functor { const T negSlope_; RReLUUpdateOutputEval_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *out, T *in) { const T x = *in; const T r = x <= 0 ? negSlope_ : ScalarConvert<int, T>::to(1); *out = x * r; } }; template <typename T> struct RReLUUpdateOutputEvalIP_functor { const T negSlope_; RReLUUpdateOutputEvalIP_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *x) { if (*x <= 0) { *x = *x * negSlope_; } } }; template <typename T> struct RReLUupdateGradInputEval_functor { const T negSlope_; RReLUupdateGradInputEval_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *gradIn, T *gradOut, T *in) { *gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut); } }; template <typename T> struct RReLUupdateGradInputEvalIP_functor { const T negSlope_; RReLUupdateGradInputEvalIP_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *gradOut, T *in) { if (*in <= 0) { *gradOut = (*gradOut) * negSlope_; } } }; #include <THHUNN/generic/RReLU.hip> #include <THH/THHGenerateFloatTypes.h>
9ef28f9b74d13651ff519b0ab1df1e46a3e3aeb4.cu
#include <algorithm> #include <utility> #include <THCUNN/THCUNN.h> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCApply.cuh> #include <THCUNN/common.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/CUDAGraphsUtils.cuh> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> // copied from cutorch/lib/THC/THCTensorRandom.cu #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #define NUM_BLOCKS(n) \ (std::min((int)THCCeilDiv(n, (ptrdiff_t)BLOCK_SIZE), MAX_NUM_BLOCKS)) template<typename T> inline T __device__ curand_uniform_type(curandStatePhilox4_32_10_t *state); template <> inline THHalf __device__ curand_uniform_type<THHalf>(curandStatePhilox4_32_10_t *state) { auto rand = curand_uniform4(state); return ScalarConvert<float, THHalf>::to(rand.x); } template <> inline float __device__ curand_uniform_type<float>(curandStatePhilox4_32_10_t *state) { auto rand = curand_uniform4(state); return rand.x; } template <> inline double __device__ curand_uniform_type<double>(curandStatePhilox4_32_10_t *state) { auto rand = curand_uniform2_double(state); return rand.x; } template <typename T> __global__ void rreluUpdateOutputTrain(int n, at::PhiloxCudaState philox_args, T *input, T* noise, T *output, double a, double b) { auto seeds = at::cuda::philox::unpack(philox_args); int idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); CUDA_KERNEL_LOOP(i, n) { if (input[i] <= 0) { T r = curand_uniform_type<T>(&state); r = ScalarConvert<double, T>::to(r * (b-a) + a); output[i] = input[i] * r; noise[i] = r; } else { output[i] = input[i]; noise[i] = ScalarConvert<int, T>::to(1); } } } template <typename T> struct RReLUUpdateOutputEval_functor { const T negSlope_; RReLUUpdateOutputEval_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *out, T *in) { const T x = *in; const T r = x <= 0 ? negSlope_ : ScalarConvert<int, T>::to(1); *out = x * r; } }; template <typename T> struct RReLUUpdateOutputEvalIP_functor { const T negSlope_; RReLUUpdateOutputEvalIP_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *x) { if (*x <= 0) { *x = *x * negSlope_; } } }; template <typename T> struct RReLUupdateGradInputEval_functor { const T negSlope_; RReLUupdateGradInputEval_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *gradIn, T *gradOut, T *in) { *gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut); } }; template <typename T> struct RReLUupdateGradInputEvalIP_functor { const T negSlope_; RReLUupdateGradInputEvalIP_functor(T negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(T *gradOut, T *in) { if (*in <= 0) { *gradOut = (*gradOut) * negSlope_; } } }; #include <THCUNN/generic/RReLU.cu> #include <THC/THCGenerateFloatTypes.h>
d83c7c9dcc9a7511464be30b32c2950add44fbc8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <iostream> #include <algorithm> using thrust::sort; using thrust::device_ptr; using std::cout; using std::endl; #define CUDA_CHECK(x) \ { hipError_t cuda_error = x; \ if (cuda_error != hipSuccess) \ cout << "hipError_t: " << cuda_error << " != 0 " \ << hipGetErrorString(cuda_error) << endl; \ } #define VECTOR_PRINT(head_str, vec, LEN) \ cout << head_str << ": {"; \ for (int i = 0; i < LEN - 1; ++i){ \ cout << vec[i] << ", "; \ } \ cout << vec[LEN - 1] << "}" << endl; #define LEN 32 int main(){ // host memory malloc & initial float* host_a = new float[LEN]; float* host_b = new float[LEN]; for (int i = 0; i < LEN; ++i){ host_a[i] = LEN - i; host_b[i] = LEN - i; } // GPU device start int device_id = 1; CUDA_CHECK(hipSetDevice(device_id)); cout << "Using GPU " << device_id << "." << endl; // hipMalloc & hipMemcpy & hipMemset float* dev_a; CUDA_CHECK(hipMalloc((void**)&dev_a, LEN * sizeof(float))); CUDA_CHECK(hipMemcpy(dev_a, host_a, LEN * sizeof(float), hipMemcpyHostToDevice)); // thrust device sort device_ptr<float> dp(dev_a); sort(dp, dp + LEN); CUDA_CHECK(hipMemcpy(host_a, dev_a, LEN * sizeof(float), hipMemcpyDeviceToHost)); VECTOR_PRINT("thrust sort", host_a, LEN); // std::sort for (int i = 0; i < LEN; ++i){ host_b[i] = float(LEN) - float(i); } std::sort(host_b, host_b+ LEN); VECTOR_PRINT("std sort", host_b, LEN); // Free gpu memory & free cpu memory CUDA_CHECK(hipFree(dev_a)); delete[] host_a; delete[] host_b; return 0; }
d83c7c9dcc9a7511464be30b32c2950add44fbc8.cu
#include <cuda_runtime.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <iostream> #include <algorithm> using thrust::sort; using thrust::device_ptr; using std::cout; using std::endl; #define CUDA_CHECK(x) \ { cudaError_t cuda_error = x; \ if (cuda_error != cudaSuccess) \ cout << "cudaError_t: " << cuda_error << " != 0 " \ << cudaGetErrorString(cuda_error) << endl; \ } #define VECTOR_PRINT(head_str, vec, LEN) \ cout << head_str << ": {"; \ for (int i = 0; i < LEN - 1; ++i){ \ cout << vec[i] << ", "; \ } \ cout << vec[LEN - 1] << "}" << endl; #define LEN 32 int main(){ // host memory malloc & initial float* host_a = new float[LEN]; float* host_b = new float[LEN]; for (int i = 0; i < LEN; ++i){ host_a[i] = LEN - i; host_b[i] = LEN - i; } // GPU device start int device_id = 1; CUDA_CHECK(cudaSetDevice(device_id)); cout << "Using GPU " << device_id << "." << endl; // cudaMalloc & cudaMemcpy & cudaMemset float* dev_a; CUDA_CHECK(cudaMalloc((void**)&dev_a, LEN * sizeof(float))); CUDA_CHECK(cudaMemcpy(dev_a, host_a, LEN * sizeof(float), cudaMemcpyHostToDevice)); // thrust device sort device_ptr<float> dp(dev_a); sort(dp, dp + LEN); CUDA_CHECK(cudaMemcpy(host_a, dev_a, LEN * sizeof(float), cudaMemcpyDeviceToHost)); VECTOR_PRINT("thrust sort", host_a, LEN); // std::sort for (int i = 0; i < LEN; ++i){ host_b[i] = float(LEN) - float(i); } std::sort(host_b, host_b+ LEN); VECTOR_PRINT("std sort", host_b, LEN); // Free gpu memory & free cpu memory CUDA_CHECK(cudaFree(dev_a)); delete[] host_a; delete[] host_b; return 0; }
500dede0559230a5dd6a5a90ed73f64c25f009c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_init_zero2_kernel; int xdim0_tea_leaf_init_zero2_kernel_h = -1; __constant__ int xdim1_tea_leaf_init_zero2_kernel; int xdim1_tea_leaf_init_zero2_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y) (x + xdim0_tea_leaf_init_zero2_kernel * (y)) #define OPS_ACC1(x, y) (x + xdim1_tea_leaf_init_zero2_kernel * (y)) // user function __device__ void tea_leaf_init_zero2_kernel_gpu(double *p, double *z) { p[OPS_ACC0(0, 0)] = 0.0; z[OPS_ACC1(0, 0)] = 0.0; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_tea_leaf_init_zero2_kernel(double *__restrict arg0, double *__restrict arg1, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_init_zero2_kernel; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_init_zero2_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_init_zero2_kernel_gpu(arg0, arg1); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_init_zero2_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_tea_leaf_init_zero2_kernel_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 2, range, 16)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(16, "tea_leaf_init_zero2_kernel"); OPS_kernels[16].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != xdim0_tea_leaf_init_zero2_kernel_h || xdim1 != xdim1_tea_leaf_init_zero2_kernel_h) { hipMemcpyToSymbol(xdim0_tea_leaf_init_zero2_kernel, &xdim0, sizeof(int)); xdim0_tea_leaf_init_zero2_kernel_h = xdim0; hipMemcpyToSymbol(xdim1_tea_leaf_init_zero2_kernel, &xdim1, sizeof(int)); xdim1_tea_leaf_init_zero2_kernel_h = xdim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[2]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[16].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_tea_leaf_init_zero2_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[16].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[16].mpi_time += t2 - t1; OPS_kernels[16].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[16].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_init_zero2_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 16; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 16; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->function = ops_par_loop_tea_leaf_init_zero2_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(16, "tea_leaf_init_zero2_kernel"); } ops_enqueue_kernel(desc); } #endif
500dede0559230a5dd6a5a90ed73f64c25f009c4.cu
// // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_init_zero2_kernel; int xdim0_tea_leaf_init_zero2_kernel_h = -1; __constant__ int xdim1_tea_leaf_init_zero2_kernel; int xdim1_tea_leaf_init_zero2_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y) (x + xdim0_tea_leaf_init_zero2_kernel * (y)) #define OPS_ACC1(x, y) (x + xdim1_tea_leaf_init_zero2_kernel * (y)) // user function __device__ void tea_leaf_init_zero2_kernel_gpu(double *p, double *z) { p[OPS_ACC0(0, 0)] = 0.0; z[OPS_ACC1(0, 0)] = 0.0; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_tea_leaf_init_zero2_kernel(double *__restrict arg0, double *__restrict arg1, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_init_zero2_kernel; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_init_zero2_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_init_zero2_kernel_gpu(arg0, arg1); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_init_zero2_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_tea_leaf_init_zero2_kernel_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 2, range, 16)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(16, "tea_leaf_init_zero2_kernel"); OPS_kernels[16].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != xdim0_tea_leaf_init_zero2_kernel_h || xdim1 != xdim1_tea_leaf_init_zero2_kernel_h) { cudaMemcpyToSymbol(xdim0_tea_leaf_init_zero2_kernel, &xdim0, sizeof(int)); xdim0_tea_leaf_init_zero2_kernel_h = xdim0; cudaMemcpyToSymbol(xdim1_tea_leaf_init_zero2_kernel, &xdim1, sizeof(int)); xdim1_tea_leaf_init_zero2_kernel_h = xdim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[2]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[16].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_tea_leaf_init_zero2_kernel<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[16].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[16].mpi_time += t2 - t1; OPS_kernels[16].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[16].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_init_zero2_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 16; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 16; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->function = ops_par_loop_tea_leaf_init_zero2_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(16, "tea_leaf_init_zero2_kernel"); } ops_enqueue_kernel(desc); } #endif
dcdf69b754875a181c226444ae009374b409332f.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are *permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this *list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this *list of conditions and the following disclaimer in the documentation and/or other *materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors *may be used to endorse or promote products derived from this software without specific *prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY *EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES *OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT *SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) *HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** * \file dnn/src/cuda/matrix_mul/uint4x4x32_wmma/wmma_matrix_mul_u4.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/utils.cuh" #include <hip/hip_runtime.h> #if TORCH_HIP_VERSION >= 10000 #if __CUDA_ARCH__ >= 730 #include <mma.h> using namespace nvcuda; using namespace wmma::experimental::precision; #endif namespace wmma_matrix_mul_u4 { constexpr uint32_t WMMA_M = 8, WMMA_N = 8, WMMA_K = 32, WARP_SIZE = 32; template <size_t WARP_X_, size_t WARP_Y_, size_t ROW_PER_WARP_, size_t COL_PER_WARP_> struct BlockConfig { static const size_t WARP_X = WARP_X_; static const size_t WARP_Y = WARP_Y_; static const size_t ROW_PER_WARP = ROW_PER_WARP_; static const size_t COL_PER_WARP = COL_PER_WARP_; static const size_t BK = 256; static const size_t BM = (WARP_Y * WMMA_M * ROW_PER_WARP); static const size_t BN = (WARP_X * WMMA_N * COL_PER_WARP); static const size_t WARPS_PER_BLOCK = WARP_X * WARP_Y; }; template <size_t BlockSize_, typename BlockConfig_> struct GlobalToShareMemStreamConfig { static const size_t BlockSize = BlockSize_; static const size_t CACHE_SIZE = (BlockSize + BlockConfig_::WARPS_PER_BLOCK - 1) / BlockConfig_::WARPS_PER_BLOCK; static const size_t SMEM_ROW = BlockSize; static const size_t SMEM_COL = BlockConfig_::BK; static const size_t SMEM_SKEW = WMMA_K * ((BlockConfig_::BK / WMMA_K) % 2 == 0); static const size_t SMEM_STRIDE = SMEM_COL + SMEM_SKEW; }; #if __CUDA_ARCH__ >= 730 template <typename BlockConfig_, typename GlobalToShareMemStreamConfig_> struct GlobalToShareMemStream { MEGDNN_STATIC_ASSERT( GlobalToShareMemStreamConfig_::BlockSize == GlobalToShareMemStreamConfig_::CACHE_SIZE * BlockConfig_::WARPS_PER_BLOCK, "Block size mismatch"); uint8_t* smem; const uint8_t* g_ptr; int ld; int row_remain; int k_base; int K; const int warp_x = threadIdx.x / WARP_SIZE; const int warp_y = threadIdx.y; const int idx_in_warp = threadIdx.x % WARP_SIZE; const int warp_id = warp_y * BlockConfig_::WARP_X + warp_x; typedef int32_t copy_t; copy_t reg_cache[GlobalToShareMemStreamConfig_::CACHE_SIZE]; __device__ GlobalToShareMemStream( uint8_t* smem, const uint8_t* g_ptr, int ld, int row_remain, int K) : smem{smem}, g_ptr{g_ptr}, ld{ld}, row_remain{row_remain}, K{K} { k_base = 0; } __device__ __forceinline__ void copy() { int col = k_base + idx_in_warp * 8; #pragma unroll for (int i = 0; i < GlobalToShareMemStreamConfig_::CACHE_SIZE; i++) { int row = i * BlockConfig_::WARPS_PER_BLOCK + warp_id; bool cond = row < row_remain && col < K; if (cond) { copy_t val = *(copy_t*)(&g_ptr[(row * ld + col) / 2]); reg_cache[i] = val; } else { reg_cache[i] = 0; } } } __device__ __forceinline__ void commit() { int col = idx_in_warp * 8; #pragma unroll for (int i = 0; i < GlobalToShareMemStreamConfig_::CACHE_SIZE; i++) { int row = i * BlockConfig_::WARPS_PER_BLOCK + warp_id; *(copy_t*)(get_smem_ptr(row, col)) = reg_cache[i]; } } __device__ __forceinline__ uint8_t* get_smem_ptr(int y, int x) { return &smem[(y * GlobalToShareMemStreamConfig_::SMEM_STRIDE + x) / 2]; } __device__ __forceinline__ void inc_stage() { k_base += GlobalToShareMemStreamConfig_::SMEM_COL; } }; template <typename BlockConfig_> __device__ inline void load_share_mem( wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[BlockConfig_::ROW_PER_WARP], wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[BlockConfig_::COL_PER_WARP], GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_>>& gbl2smem_a, GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_>>& gbl2smem_b, int warp_k) { typedef GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_> Config_A; typedef GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_> Config_B; const int warp_x = threadIdx.x / WARP_SIZE; const int warp_y = threadIdx.y; uint8_t* __restrict__ s_ptr_a = gbl2smem_a.get_smem_ptr(warp_y * WMMA_M, warp_k * WMMA_K); uint8_t* __restrict__ s_ptr_b = gbl2smem_b.get_smem_ptr(warp_x * WMMA_N, warp_k * WMMA_K); const int stride_a = BlockConfig_::WARP_Y * WMMA_M; const int stride_b = BlockConfig_::WARP_X * WMMA_N; #pragma unroll for (int i = 0; i < BlockConfig_::ROW_PER_WARP; ++i) { wmma::load_matrix_sync( a_frag[i], s_ptr_a + i * stride_a * Config_A::SMEM_STRIDE / 2, Config_A::SMEM_STRIDE); } #pragma unroll for (int j = 0; j < BlockConfig_::COL_PER_WARP; ++j) { wmma::load_matrix_sync( b_frag[j], s_ptr_b + j * stride_b * Config_B::SMEM_STRIDE / 2, Config_B::SMEM_STRIDE); } } template <size_t ROW_PER_WARP, size_t COL_PER_WARP> __device__ inline void calc( wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[ROW_PER_WARP], wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[COL_PER_WARP], wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t> acc_frag[ROW_PER_WARP][COL_PER_WARP]) { #pragma unroll for (int i = 0; i < ROW_PER_WARP; ++i) { #pragma unroll for (int j = 0; j < COL_PER_WARP; ++j) { wmma::mma_sync(acc_frag[i][j], a_frag[i], b_frag[j], acc_frag[i][j]); } } } template <bool last_block, typename BlockConfig_> __device__ void inline consume_tile( GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_>>& gbl2smem_a, GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_>>& gbl2smem_b, wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[2][BlockConfig_::ROW_PER_WARP], wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[2][BlockConfig_::COL_PER_WARP], wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t> acc_frag[BlockConfig_::ROW_PER_WARP][BlockConfig_::COL_PER_WARP]) { if (!last_block) { gbl2smem_a.inc_stage(); gbl2smem_b.inc_stage(); gbl2smem_a.copy(); gbl2smem_b.copy(); } int warp_k = 0; #pragma unroll for (warp_k = 0; warp_k < BlockConfig_::BK / WMMA_K - 1; ++warp_k) { load_share_mem<BlockConfig_>( a_frag[(warp_k + 1) % 2], b_frag[(warp_k + 1) % 2], gbl2smem_a, gbl2smem_b, warp_k + 1); calc<BlockConfig_::ROW_PER_WARP, BlockConfig_::COL_PER_WARP>( a_frag[warp_k % 2], b_frag[warp_k % 2], acc_frag); } calc<BlockConfig_::ROW_PER_WARP, BlockConfig_::COL_PER_WARP>( a_frag[warp_k % 2], b_frag[warp_k % 2], acc_frag); if (!last_block) { __syncthreads(); gbl2smem_a.commit(); gbl2smem_b.commit(); __syncthreads(); load_share_mem<BlockConfig_>(a_frag[0], b_frag[0], gbl2smem_a, gbl2smem_b, 0); } } template <typename BlockConfig_> __global__ void u4_gemm_template_device_nt( const uint8_t* A, const uint8_t* B, int32_t* C, int M, int N, int K, int lda, int ldb, int ldc) { typedef GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_> Config_A; typedef GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_> Config_B; __shared__ uint8_t smem_a[BlockConfig_::BM][Config_A::SMEM_STRIDE / 2]; __shared__ uint8_t smem_b[BlockConfig_::BN][Config_B::SMEM_STRIDE / 2]; const int bidx = blockIdx.x; const int bidy = blockIdx.y; const uint8_t* g_ptr_a = A + bidy * BlockConfig_::BM * lda / 2; const uint8_t* g_ptr_b = B + bidx * BlockConfig_::BN * ldb / 2; const int warp_x = threadIdx.x / WARP_SIZE; const int warp_y = threadIdx.y; const int warp_row_start = bidy * BlockConfig_::BM + warp_y * WMMA_M; const int warp_col_start = bidx * BlockConfig_::BN + warp_x * WMMA_N; int32_t* g_ptr_c = C + warp_row_start * ldc + warp_col_start; GlobalToShareMemStream<BlockConfig_, Config_A> gbl2smem_a( &smem_a[0][0], g_ptr_a, lda, M - bidy, K); GlobalToShareMemStream<BlockConfig_, Config_B> gbl2smem_b( &smem_b[0][0], g_ptr_b, ldb, N - bidx, K); wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t> acc_frag[BlockConfig_::ROW_PER_WARP][BlockConfig_::COL_PER_WARP]; wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[2][BlockConfig_::ROW_PER_WARP]; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[2][BlockConfig_::COL_PER_WARP]; #pragma unroll for (int i = 0; i < BlockConfig_::ROW_PER_WARP; ++i) { #pragma unroll for (int j = 0; j < BlockConfig_::COL_PER_WARP; ++j) { wmma::fill_fragment(acc_frag[i][j], 0); } } gbl2smem_a.copy(); gbl2smem_b.copy(); gbl2smem_a.commit(); gbl2smem_b.commit(); __syncthreads(); load_share_mem(a_frag[0], b_frag[0], gbl2smem_a, gbl2smem_b, 0); const int BLK_K = (K + BlockConfig_::BK - 1) / BlockConfig_::BK; #pragma unroll 1 for (int blk_k = 0; blk_k < BLK_K - 1; ++blk_k) { consume_tile<false, BlockConfig_>( gbl2smem_a, gbl2smem_b, a_frag, b_frag, acc_frag); } consume_tile<true, BlockConfig_>(gbl2smem_a, gbl2smem_b, a_frag, b_frag, acc_frag); #pragma unroll for (int i = 0; i < BlockConfig_::ROW_PER_WARP; ++i) { #pragma unroll for (int j = 0; j < BlockConfig_::COL_PER_WARP; ++j) { if (warp_row_start + i * BlockConfig_::WARP_Y * WMMA_M <= M - WMMA_M && warp_col_start + j * BlockConfig_::WARP_X * WMMA_N <= N - WMMA_N) { wmma::store_matrix_sync( &g_ptr_c [(i * BlockConfig_::WARP_Y * WMMA_M) * ldc + (j * BlockConfig_::WARP_X * WMMA_N)], acc_frag[i][j], ldc, wmma::mem_row_major); } } } } #else template <typename BlockConfig_> __global__ void u4_gemm_template_device_nt( const uint8_t* /*A*/, const uint8_t* /*B*/, int32_t* /*C*/, int /*M*/, int /*N*/, int /*K*/, int /*lda*/, int /*ldb*/, int /*ldc*/) {} #endif void _do_dispatch_wmma_matrix_mul_u4( const uint8_t* A, const uint8_t* B, int32_t* C, int M, int N, int K, int lda, int ldb, int ldc, hipStream_t stream) { constexpr uint32_t warp_x = 4, warp_y = 4, row_per_warp = 4, col_per_warp = 4; typedef BlockConfig<warp_x, warp_y, row_per_warp, col_per_warp> BlockConfig_; dim3 block{warp_x * WARP_SIZE, warp_y}; dim3 grid{ static_cast<unsigned int>(DIVUP(N, BlockConfig_::BN)), static_cast<unsigned int>(DIVUP(M, BlockConfig_::BM))}; hipLaunchKernelGGL(( u4_gemm_template_device_nt<BlockConfig_>) , dim3(grid), dim3(block), 0, stream, A, B, C, M, N, K, lda, ldb, ldc); after_kernel_launch(); } } // namespace wmma_matrix_mul_u4 namespace megdnn { namespace cuda { void exec_wmma_gemm_u4( const uint8_t* A, const uint8_t* B, int32_t* C, int M, int N, int K, int lda, int ldb, int ldc, hipStream_t stream) { wmma_matrix_mul_u4::_do_dispatch_wmma_matrix_mul_u4( A, B, C, M, N, K, lda, ldb, ldc, stream); } } // namespace cuda } // namespace megdnn #endif // vim: syntax=cpp.doxygen
dcdf69b754875a181c226444ae009374b409332f.cu
/*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are *permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this *list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this *list of conditions and the following disclaimer in the documentation and/or other *materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors *may be used to endorse or promote products derived from this software without specific *prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY *EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES *OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT *SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) *HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** * \file dnn/src/cuda/matrix_mul/uint4x4x32_wmma/wmma_matrix_mul_u4.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/utils.cuh" #include <cuda.h> #if CUDA_VERSION >= 10000 #if __CUDA_ARCH__ >= 730 #include <mma.h> using namespace nvcuda; using namespace wmma::experimental::precision; #endif namespace wmma_matrix_mul_u4 { constexpr uint32_t WMMA_M = 8, WMMA_N = 8, WMMA_K = 32, WARP_SIZE = 32; template <size_t WARP_X_, size_t WARP_Y_, size_t ROW_PER_WARP_, size_t COL_PER_WARP_> struct BlockConfig { static const size_t WARP_X = WARP_X_; static const size_t WARP_Y = WARP_Y_; static const size_t ROW_PER_WARP = ROW_PER_WARP_; static const size_t COL_PER_WARP = COL_PER_WARP_; static const size_t BK = 256; static const size_t BM = (WARP_Y * WMMA_M * ROW_PER_WARP); static const size_t BN = (WARP_X * WMMA_N * COL_PER_WARP); static const size_t WARPS_PER_BLOCK = WARP_X * WARP_Y; }; template <size_t BlockSize_, typename BlockConfig_> struct GlobalToShareMemStreamConfig { static const size_t BlockSize = BlockSize_; static const size_t CACHE_SIZE = (BlockSize + BlockConfig_::WARPS_PER_BLOCK - 1) / BlockConfig_::WARPS_PER_BLOCK; static const size_t SMEM_ROW = BlockSize; static const size_t SMEM_COL = BlockConfig_::BK; static const size_t SMEM_SKEW = WMMA_K * ((BlockConfig_::BK / WMMA_K) % 2 == 0); static const size_t SMEM_STRIDE = SMEM_COL + SMEM_SKEW; }; #if __CUDA_ARCH__ >= 730 template <typename BlockConfig_, typename GlobalToShareMemStreamConfig_> struct GlobalToShareMemStream { MEGDNN_STATIC_ASSERT( GlobalToShareMemStreamConfig_::BlockSize == GlobalToShareMemStreamConfig_::CACHE_SIZE * BlockConfig_::WARPS_PER_BLOCK, "Block size mismatch"); uint8_t* smem; const uint8_t* g_ptr; int ld; int row_remain; int k_base; int K; const int warp_x = threadIdx.x / WARP_SIZE; const int warp_y = threadIdx.y; const int idx_in_warp = threadIdx.x % WARP_SIZE; const int warp_id = warp_y * BlockConfig_::WARP_X + warp_x; typedef int32_t copy_t; copy_t reg_cache[GlobalToShareMemStreamConfig_::CACHE_SIZE]; __device__ GlobalToShareMemStream( uint8_t* smem, const uint8_t* g_ptr, int ld, int row_remain, int K) : smem{smem}, g_ptr{g_ptr}, ld{ld}, row_remain{row_remain}, K{K} { k_base = 0; } __device__ __forceinline__ void copy() { int col = k_base + idx_in_warp * 8; #pragma unroll for (int i = 0; i < GlobalToShareMemStreamConfig_::CACHE_SIZE; i++) { int row = i * BlockConfig_::WARPS_PER_BLOCK + warp_id; bool cond = row < row_remain && col < K; if (cond) { copy_t val = *(copy_t*)(&g_ptr[(row * ld + col) / 2]); reg_cache[i] = val; } else { reg_cache[i] = 0; } } } __device__ __forceinline__ void commit() { int col = idx_in_warp * 8; #pragma unroll for (int i = 0; i < GlobalToShareMemStreamConfig_::CACHE_SIZE; i++) { int row = i * BlockConfig_::WARPS_PER_BLOCK + warp_id; *(copy_t*)(get_smem_ptr(row, col)) = reg_cache[i]; } } __device__ __forceinline__ uint8_t* get_smem_ptr(int y, int x) { return &smem[(y * GlobalToShareMemStreamConfig_::SMEM_STRIDE + x) / 2]; } __device__ __forceinline__ void inc_stage() { k_base += GlobalToShareMemStreamConfig_::SMEM_COL; } }; template <typename BlockConfig_> __device__ inline void load_share_mem( wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[BlockConfig_::ROW_PER_WARP], wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[BlockConfig_::COL_PER_WARP], GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_>>& gbl2smem_a, GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_>>& gbl2smem_b, int warp_k) { typedef GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_> Config_A; typedef GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_> Config_B; const int warp_x = threadIdx.x / WARP_SIZE; const int warp_y = threadIdx.y; uint8_t* __restrict__ s_ptr_a = gbl2smem_a.get_smem_ptr(warp_y * WMMA_M, warp_k * WMMA_K); uint8_t* __restrict__ s_ptr_b = gbl2smem_b.get_smem_ptr(warp_x * WMMA_N, warp_k * WMMA_K); const int stride_a = BlockConfig_::WARP_Y * WMMA_M; const int stride_b = BlockConfig_::WARP_X * WMMA_N; #pragma unroll for (int i = 0; i < BlockConfig_::ROW_PER_WARP; ++i) { wmma::load_matrix_sync( a_frag[i], s_ptr_a + i * stride_a * Config_A::SMEM_STRIDE / 2, Config_A::SMEM_STRIDE); } #pragma unroll for (int j = 0; j < BlockConfig_::COL_PER_WARP; ++j) { wmma::load_matrix_sync( b_frag[j], s_ptr_b + j * stride_b * Config_B::SMEM_STRIDE / 2, Config_B::SMEM_STRIDE); } } template <size_t ROW_PER_WARP, size_t COL_PER_WARP> __device__ inline void calc( wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[ROW_PER_WARP], wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[COL_PER_WARP], wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t> acc_frag[ROW_PER_WARP][COL_PER_WARP]) { #pragma unroll for (int i = 0; i < ROW_PER_WARP; ++i) { #pragma unroll for (int j = 0; j < COL_PER_WARP; ++j) { wmma::mma_sync(acc_frag[i][j], a_frag[i], b_frag[j], acc_frag[i][j]); } } } template <bool last_block, typename BlockConfig_> __device__ void inline consume_tile( GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_>>& gbl2smem_a, GlobalToShareMemStream< BlockConfig_, GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_>>& gbl2smem_b, wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[2][BlockConfig_::ROW_PER_WARP], wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[2][BlockConfig_::COL_PER_WARP], wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t> acc_frag[BlockConfig_::ROW_PER_WARP][BlockConfig_::COL_PER_WARP]) { if (!last_block) { gbl2smem_a.inc_stage(); gbl2smem_b.inc_stage(); gbl2smem_a.copy(); gbl2smem_b.copy(); } int warp_k = 0; #pragma unroll for (warp_k = 0; warp_k < BlockConfig_::BK / WMMA_K - 1; ++warp_k) { load_share_mem<BlockConfig_>( a_frag[(warp_k + 1) % 2], b_frag[(warp_k + 1) % 2], gbl2smem_a, gbl2smem_b, warp_k + 1); calc<BlockConfig_::ROW_PER_WARP, BlockConfig_::COL_PER_WARP>( a_frag[warp_k % 2], b_frag[warp_k % 2], acc_frag); } calc<BlockConfig_::ROW_PER_WARP, BlockConfig_::COL_PER_WARP>( a_frag[warp_k % 2], b_frag[warp_k % 2], acc_frag); if (!last_block) { __syncthreads(); gbl2smem_a.commit(); gbl2smem_b.commit(); __syncthreads(); load_share_mem<BlockConfig_>(a_frag[0], b_frag[0], gbl2smem_a, gbl2smem_b, 0); } } template <typename BlockConfig_> __global__ void u4_gemm_template_device_nt( const uint8_t* A, const uint8_t* B, int32_t* C, int M, int N, int K, int lda, int ldb, int ldc) { typedef GlobalToShareMemStreamConfig<BlockConfig_::BM, BlockConfig_> Config_A; typedef GlobalToShareMemStreamConfig<BlockConfig_::BN, BlockConfig_> Config_B; __shared__ uint8_t smem_a[BlockConfig_::BM][Config_A::SMEM_STRIDE / 2]; __shared__ uint8_t smem_b[BlockConfig_::BN][Config_B::SMEM_STRIDE / 2]; const int bidx = blockIdx.x; const int bidy = blockIdx.y; const uint8_t* g_ptr_a = A + bidy * BlockConfig_::BM * lda / 2; const uint8_t* g_ptr_b = B + bidx * BlockConfig_::BN * ldb / 2; const int warp_x = threadIdx.x / WARP_SIZE; const int warp_y = threadIdx.y; const int warp_row_start = bidy * BlockConfig_::BM + warp_y * WMMA_M; const int warp_col_start = bidx * BlockConfig_::BN + warp_x * WMMA_N; int32_t* g_ptr_c = C + warp_row_start * ldc + warp_col_start; GlobalToShareMemStream<BlockConfig_, Config_A> gbl2smem_a( &smem_a[0][0], g_ptr_a, lda, M - bidy, K); GlobalToShareMemStream<BlockConfig_, Config_B> gbl2smem_b( &smem_b[0][0], g_ptr_b, ldb, N - bidx, K); wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t> acc_frag[BlockConfig_::ROW_PER_WARP][BlockConfig_::COL_PER_WARP]; wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major> a_frag[2][BlockConfig_::ROW_PER_WARP]; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major> b_frag[2][BlockConfig_::COL_PER_WARP]; #pragma unroll for (int i = 0; i < BlockConfig_::ROW_PER_WARP; ++i) { #pragma unroll for (int j = 0; j < BlockConfig_::COL_PER_WARP; ++j) { wmma::fill_fragment(acc_frag[i][j], 0); } } gbl2smem_a.copy(); gbl2smem_b.copy(); gbl2smem_a.commit(); gbl2smem_b.commit(); __syncthreads(); load_share_mem(a_frag[0], b_frag[0], gbl2smem_a, gbl2smem_b, 0); const int BLK_K = (K + BlockConfig_::BK - 1) / BlockConfig_::BK; #pragma unroll 1 for (int blk_k = 0; blk_k < BLK_K - 1; ++blk_k) { consume_tile<false, BlockConfig_>( gbl2smem_a, gbl2smem_b, a_frag, b_frag, acc_frag); } consume_tile<true, BlockConfig_>(gbl2smem_a, gbl2smem_b, a_frag, b_frag, acc_frag); #pragma unroll for (int i = 0; i < BlockConfig_::ROW_PER_WARP; ++i) { #pragma unroll for (int j = 0; j < BlockConfig_::COL_PER_WARP; ++j) { if (warp_row_start + i * BlockConfig_::WARP_Y * WMMA_M <= M - WMMA_M && warp_col_start + j * BlockConfig_::WARP_X * WMMA_N <= N - WMMA_N) { wmma::store_matrix_sync( &g_ptr_c [(i * BlockConfig_::WARP_Y * WMMA_M) * ldc + (j * BlockConfig_::WARP_X * WMMA_N)], acc_frag[i][j], ldc, wmma::mem_row_major); } } } } #else template <typename BlockConfig_> __global__ void u4_gemm_template_device_nt( const uint8_t* /*A*/, const uint8_t* /*B*/, int32_t* /*C*/, int /*M*/, int /*N*/, int /*K*/, int /*lda*/, int /*ldb*/, int /*ldc*/) {} #endif void _do_dispatch_wmma_matrix_mul_u4( const uint8_t* A, const uint8_t* B, int32_t* C, int M, int N, int K, int lda, int ldb, int ldc, cudaStream_t stream) { constexpr uint32_t warp_x = 4, warp_y = 4, row_per_warp = 4, col_per_warp = 4; typedef BlockConfig<warp_x, warp_y, row_per_warp, col_per_warp> BlockConfig_; dim3 block{warp_x * WARP_SIZE, warp_y}; dim3 grid{ static_cast<unsigned int>(DIVUP(N, BlockConfig_::BN)), static_cast<unsigned int>(DIVUP(M, BlockConfig_::BM))}; u4_gemm_template_device_nt<BlockConfig_> <<<grid, block, 0, stream>>>(A, B, C, M, N, K, lda, ldb, ldc); after_kernel_launch(); } } // namespace wmma_matrix_mul_u4 namespace megdnn { namespace cuda { void exec_wmma_gemm_u4( const uint8_t* A, const uint8_t* B, int32_t* C, int M, int N, int K, int lda, int ldb, int ldc, cudaStream_t stream) { wmma_matrix_mul_u4::_do_dispatch_wmma_matrix_mul_u4( A, B, C, M, N, K, lda, ldb, ldc, stream); } } // namespace cuda } // namespace megdnn #endif // vim: syntax=cpp.doxygen
bbb5755ee9d63d199ee35f26f27fe2d2a869f93c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_matrix.h" #include "hl_matrix_ops.cuh" #include "hl_matrix_apply.cuh" #include "hl_sequence.h" #include "paddle/utils/Logging.h" #include "hl_device_functions.cuh" DEFINE_MATRIX_UNARY_OP(Zero, a = 0); DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1*a + p2*b); void hl_matrix_add(real *A_d, real *B_d, real *C_d, int dimM, int dimN, real alpha, real beta) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(B_d); CHECK_NOTNULL(C_d); hl_gpu_apply_ternary_op <real, ternary::_add<real>, 0, 0>(ternary::_add<real>(alpha, beta), A_d, B_d, C_d, dimM, dimN, dimN, dimN, dimN); CHECK_SYNC("hl_matrix_add failed"); } #ifdef HPPL_TYPE_DOUBLE #define THRESHOLD 128 #else #define THRESHOLD 64 #endif __device__ __forceinline__ void findMax(real* I, real* dfMax_s, int blockSize, int base, int curIdx, int nextIdx, int dimN, real* max) { dfMax_s[base] = -1.0e20; while (curIdx < dimN) { if (dfMax_s[base] < I[nextIdx]) { dfMax_s[base] = I[nextIdx]; } nextIdx += blockSize; curIdx += blockSize; } __syncthreads(); for (int stride = blockSize >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (base < stride) { nextIdx = base + stride; if (dfMax_s[base] < dfMax_s[nextIdx]) { dfMax_s[base] = dfMax_s[nextIdx]; } } } if (0 == base) { max[0] = dfMax_s[0]; } __syncthreads(); } __device__ __forceinline__ void subMaxAndExp(real* I, real* O, int curIdx, int nextIdx, int blockSize, int dimN, real max) { real val; while (curIdx < dimN) { val = I[nextIdx] - max; if (val < -THRESHOLD) { val = -THRESHOLD; } I[nextIdx] = val; #ifndef HPPL_TYPE_DOUBLE O[nextIdx] = __expf(val); #else O[nextIdx] = exp(val); #endif nextIdx += blockSize; curIdx += blockSize; } __syncthreads(); } __device__ __forceinline__ void valueSum(real* O, real* dfMax_s, int blockSize, int base, int curIdx, int nextIdx, int dimN) { dfMax_s[base] = 0; while (curIdx < dimN) { dfMax_s[base] += O[nextIdx]; nextIdx += blockSize; curIdx += blockSize; } __syncthreads(); for (int stride = blockSize >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (base < stride) { nextIdx = base + stride; dfMax_s[base] += dfMax_s[nextIdx]; } } __syncthreads(); } __device__ __forceinline__ void divSum(real* O, real sum, int curIdx, int nextIdx, int blockSize, int dimN) { while (curIdx < dimN) { O[nextIdx] /= sum; nextIdx += blockSize; curIdx += blockSize; } } __device__ __forceinline__ void softmax(real* I, real* O, real* dfMax_s, int blockSize, int base, int curIdx, int nextIdx, int dimN) { __shared__ real max; // find the max number findMax(I, dfMax_s, blockSize, base, curIdx, nextIdx, dimN, &max); // sub max Value and do Exp operation subMaxAndExp(I, O, base, nextIdx, blockSize, dimN, max); // add dimN values into blockDim.x buffer // sum is in dfMax_s[0] valueSum(O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN); // divided by sum divSum(O, dfMax_s[0], curIdx, nextIdx, blockSize, dimN); } template<int blockSize> __global__ void KeMatrixSoftMax(real *O, real *I, int dimN) { int base = threadIdx.x; __shared__ real dfMax_s[blockSize]; int nextIdx = blockIdx.x * dimN + base; int curIdx = base; softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN); } void hl_matrix_softmax(real *A_d, real *C_d, int dimM, int dimN) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(C_d); dim3 block(512, 1); dim3 grid(dimM, 1); hipLaunchKernelGGL(( KeMatrixSoftMax<512>) , dim3(grid), dim3(block), 0, STREAM_DEFAULT, C_d, A_d, dimN); CHECK_SYNC("hl_matrix_softmax failed"); } template<int blockSize> __global__ void KeSequenceSoftMax(real *O, real *I, const int* index) { int base = threadIdx.x; int bid = blockIdx.x; __shared__ real dfMax_s[blockSize]; int start = index[bid]; int dimN = index[bid + 1] - start; int nextIdx = start + base; int curIdx = base; softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN); } void hl_sequence_softmax_forward(real *A_d, real *C_d, const int* index, int numSequence) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(C_d); dim3 block(512, 1); dim3 grid(numSequence, 1); hipLaunchKernelGGL(( KeSequenceSoftMax<512>) , dim3(grid), dim3(block), 0, STREAM_DEFAULT, C_d, A_d, index); CHECK_SYNC("hl_sequence_softmax_forward failed"); } __global__ void KeMatrixDerivative(real *grad_d, real *output_d, real *sftmaxSum_d, int dimM, int dimN) { int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; int colIdx = blockIdx.y*blockDim.y + threadIdx.y; int index; if (rowIdx < dimM && colIdx < dimN) { index = rowIdx*dimN + colIdx; grad_d[index] = output_d[index] * (grad_d[index] - sftmaxSum_d[rowIdx]); } } void hl_matrix_softmax_derivative(real *grad_d, real *output_d, real *sftmaxSum_d, int dimM, int dimN) { CHECK_NOTNULL(grad_d); CHECK_NOTNULL(output_d); CHECK_NOTNULL(sftmaxSum_d); int blocksX = (dimM + 0) / 1; int blocksY = (dimN + 1024 -1) / 1024; dim3 threads(1, 1024); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( KeMatrixDerivative), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , grad_d, output_d, sftmaxSum_d, dimM, dimN); CHECK_SYNC("hl_matrix_softmax_derivative failed"); } template<int blockSize> __global__ void KeMatrixClassificationError(real* in_A, int* in_B, real* out_C, int dimN) { __shared__ real max_s[blockSize]; __shared__ int max_l[blockSize]; const int tid = threadIdx.x; const int rowId = blockIdx.x; max_s[tid] = -1e30f; in_A += rowId * dimN; real tmp; for (int colId = tid; colId < dimN; colId += blockSize) { tmp = in_A[colId]; if (max_s[tid] < tmp) { max_s[tid] = tmp; max_l[tid] = colId; } } __syncthreads(); for (int stride = blockSize/2; stride > 0; stride = stride/2) { if (tid < stride) { if (max_s[tid] < max_s[tid + stride]) { max_s[tid] = max_s[tid + stride]; max_l[tid] = max_l[tid + stride]; } } __syncthreads(); } __syncthreads(); if (tid == 0) { out_C[rowId] = (max_l[0] == in_B[rowId] ? 0 : 1.0f); } } void hl_matrix_classification_error(real* A_d, int* B_d, real* C_d, int dimM, int dimN) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(B_d); CHECK_NOTNULL(C_d); // each sample is calculated by one block hipLaunchKernelGGL(( KeMatrixClassificationError<1024>), dim3(dimM), dim3(1024), 0, STREAM_DEFAULT , A_d, B_d, C_d, dimN); CHECK_SYNC("hl_matrix_classification_error"); } __global__ void KeMatrixCrossEntropy(real* O, real* E, int* label, int dimM, int dimN) { int index = blockIdx.x * blockDim.x + threadIdx.x; int newBase; if (index < dimM) { newBase = label[index]; newBase = newBase % dimN; E[index] = -log(O[index * dimN + newBase]); } } void hl_matrix_cross_entropy(real* A_d, real* C_d, int* label_d, int dimM, int dimN) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(C_d); int blocks = (dimM + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KeMatrixCrossEntropy), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , A_d, C_d, label_d, dimM, dimN); CHECK_SYNC("hl_matrix_cross_entropy failed"); } __global__ void KeMatrixCrossEntropyBp(real* grad_d, real* output_d, int* label_d, int dimM, int dimN) { int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; int colIdx = blockIdx.y*blockDim.y + threadIdx.y; int index; if (rowIdx < dimM && colIdx < dimN) { index = rowIdx*dimN + colIdx; if (label_d[rowIdx] == colIdx) { grad_d[index] -= 1.0f / output_d[index]; } } } void hl_matrix_cross_entropy_bp(real* grad_d, real* output_d, int* label_d, int dimM, int dimN) { CHECK_NOTNULL(grad_d); CHECK_NOTNULL(output_d); CHECK_NOTNULL(label_d); int blocksX = (dimM + 0)/1; int blocksY = (dimN + 1024 -1) / 1024; dim3 threads(1, 1024); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( KeMatrixCrossEntropyBp), dim3(grid), dim3(threads), 0, STREAM_DEFAULT , grad_d, output_d, label_d, dimM, dimN); CHECK_SYNC("hl_matrix_cross_entropy_bp failed"); } void hl_matrix_zero_mem(real* data, int num) { hl_gpu_apply_unary_op( unary::Zero<real>(), data, 1, num, num); } __global__ void KeParamReluForward(real* output, real* input, real* w, int width, int height, int partial_sum) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx < width && ty < height) { int index = ty * width + tx; output[index] = input[index] > 0 ? input[index] : input[index] * w[tx / partial_sum]; } } void hl_param_relu_forward(real* output, real* input, real* w, int width, int height, int partial_sum) { CHECK_NOTNULL(output); CHECK_NOTNULL(input); CHECK_NOTNULL(w); dim3 threads(16, 16); int blockX = (width + 16 - 1) / 16; int blockY = (height + 16 -1) / 16; dim3 grid(blockX, blockY); hipLaunchKernelGGL(( KeParamReluForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, output, input, w, width, height, partial_sum); CHECK_SYNC("hl_param_relu_forward failed"); } template<int blockSize> __global__ void KeParamReluBackWardW(real* grad_w, real* grad_o, real* input, int width, int height, int partial_sum) { const int tid = threadIdx.x; __shared__ real temp[blockSize]; grad_o += partial_sum * blockIdx.x; input += partial_sum * blockIdx.x; real tmp = 0.0; for (int index = tid; index < partial_sum * height; index += blockSize) { int row = index / partial_sum; int offset = row * width + (index - row * partial_sum); if (input[offset] < 0) { tmp += grad_o[offset] * input[offset]; } } temp[tid] = tmp; __syncthreads(); for (int s = blockSize / 2; s > 0; s >>= 1) { if (tid < s) { temp[tid] += temp[tid + s]; } __syncthreads(); } if (tid == 0) { grad_w[blockIdx.x] += temp[0]; } } void hl_param_relu_backward_w(real* grad_w, real* grad_o, real* input, int width, int height, int partial_sum) { CHECK_NOTNULL(grad_w); CHECK_NOTNULL(grad_o); CHECK_NOTNULL(input); const int blockSize = 1024; int grid_num = width / partial_sum; dim3 threads(blockSize, 1); dim3 grid(grid_num, 1); hipLaunchKernelGGL(( KeParamReluBackWardW<blockSize>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, grad_w, grad_o, input, width, height, partial_sum); CHECK_SYNC("hl_param_relu_backward_w failed"); } __global__ void KeParamReluBackwardDiff(real* grad_o, real* input, real* w, real* diff, int width, int height, int partial_sum) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx < width && ty < height) { int index = ty * width + tx; diff[index] += grad_o[index] * (input[index] > 0 ? 1 : w[tx / partial_sum]); } } void hl_param_relu_backward_diff(real* grad_o, real* data, real* w, real* diff, int width, int height, int partial_sum) { CHECK_NOTNULL(grad_o); CHECK_NOTNULL(data); CHECK_NOTNULL(w); CHECK_NOTNULL(diff); dim3 threads(16, 16); int blockX = (width + 16 - 1) / 16; int blockY = (height + 16 -1) / 16; dim3 grid(blockX, blockY); hipLaunchKernelGGL(( KeParamReluBackwardDiff), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, grad_o, data, w, diff, width, height, partial_sum); CHECK_SYNC("hl_param_relu_backward_diff failed"); } template<int blockSize> __global__ void KeCosSim(real* output, real* input1, real* input2, int width, int input1_height, int input2_height, real scale) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ real xx[blockSize]; __shared__ real yy[blockSize]; __shared__ real xy[blockSize]; xx[tid] = 0.0; yy[tid] = 0.0; xy[tid] = 0.0; __syncthreads(); input1 += ty * width; if (input2_height > 1) { input2 += ty * width; } for (int index = tid; index < width; index += blockSize) { real x = input1[index]; real y = input2[index]; xx[tid] += x * x; yy[tid] += y * y; xy[tid] += x * y; } __syncthreads(); for (int s = blockSize / 2; s > 0; s >>= 1) { if (tid < s) { xx[tid] += xx[tid + s]; yy[tid] += yy[tid + s]; xy[tid] += xy[tid + s]; } __syncthreads(); } if (tid == 0) { output[ty] = scale * xy[0] / (sqrt(xx[0]) * sqrt(yy[0])); } } void hl_cossim(real* output, real* input1, real* input2, int width, int input1_height, int input2_height, real scale) { CHECK_NOTNULL(output); CHECK_NOTNULL(input1); CHECK_NOTNULL(input2); const int blockSize = 256; dim3 threads(blockSize, 1); dim3 grid(1, input1_height); hipLaunchKernelGGL(( KeCosSim<blockSize>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, output, input1, input2, width, input1_height, input2_height, scale); CHECK_SYNC("hl_cossim failed"); } template<int blockSize> __global__ void KeCosSimDerivative(real* grad, real* output, real* prevOutX, real* prevOutY, real* prevGradX, real* prevGradY, int width, int input1_height, int input2_height, real scale) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ real xx[blockSize]; __shared__ real yy[blockSize]; __shared__ real xy[blockSize]; xx[tid] = 0.0; yy[tid] = 0.0; xy[tid] = 0.0; __syncthreads(); prevOutX += ty * width; prevGradX += ty * width; if (input2_height > 1) { prevOutY += ty * width; prevGradY += ty * width; } for (int index = tid; index < width; index += blockSize) { real x = prevOutX[index]; real y = prevOutY[index]; xx[tid] += x * x; yy[tid] += y * y; xy[tid] += x * y; } __syncthreads(); for (int s = blockSize / 2; s > 0; s >>= 1) { if (tid < s) { xx[tid] += xx[tid + s]; yy[tid] += yy[tid + s]; xy[tid] += xy[tid + s]; } __syncthreads(); } if (xy[0] == 0) { real reciprocal = 1.0 / (sqrt(xx[0]) * sqrt(yy[0])); for (int index = tid; index < width; index += blockSize) { prevGradX[index] += scale * grad[ty] * prevOutY[index] * reciprocal; if (input2_height > 1) { prevGradY[index] += scale * grad[ty] * prevOutX[index] * reciprocal; } else { paddle::paddleAtomicAdd(prevGradY + index, scale * grad[ty] * prevOutX[index] * reciprocal); } } } else { real reciprocalXY = 1.0 / xy[0]; real reciprocalSquareSumX = 1.0 / xx[0]; real reciprocalSquareSumY = 1.0 / yy[0]; for (int index = tid; index < width; index += blockSize) { prevGradX[index] += output[ty] * grad[ty] * (prevOutY[index] * reciprocalXY - prevOutX[index] * reciprocalSquareSumX); if (input2_height > 1) { prevGradY[index] += output[ty] * grad[ty] * (prevOutX[index] * reciprocalXY - prevOutY[index] * reciprocalSquareSumY); } else { paddle::paddleAtomicAdd(prevGradY + index, output[ty] * grad[ty] * (prevOutX[index] * reciprocalXY - prevOutY[index] * reciprocalSquareSumY)); } } } } void hl_cossim_derivative(real* grad, real* output, real* prevOutX, real* prevOutY, real* prevGradX, real* prevGradY, int width, int input1_height, int input2_height, real scale) { CHECK_NOTNULL(grad); CHECK_NOTNULL(output); CHECK_NOTNULL(prevOutX); CHECK_NOTNULL(prevOutY); CHECK_NOTNULL(prevGradX); CHECK_NOTNULL(prevGradY); const int blockSize = 256; dim3 threads(blockSize, 1); dim3 grid(1, input1_height); hipLaunchKernelGGL(( KeCosSimDerivative<blockSize>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, grad, output, prevOutX, prevOutY, prevGradX, prevGradY, width, input1_height, input2_height, scale); CHECK_SYNC("hl_cossim_derivate failed"); }
bbb5755ee9d63d199ee35f26f27fe2d2a869f93c.cu
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "hl_base.h" #include "hl_matrix.h" #include "hl_matrix_ops.cuh" #include "hl_matrix_apply.cuh" #include "hl_sequence.h" #include "paddle/utils/Logging.h" #include "hl_device_functions.cuh" DEFINE_MATRIX_UNARY_OP(Zero, a = 0); DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1*a + p2*b); void hl_matrix_add(real *A_d, real *B_d, real *C_d, int dimM, int dimN, real alpha, real beta) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(B_d); CHECK_NOTNULL(C_d); hl_gpu_apply_ternary_op <real, ternary::_add<real>, 0, 0>(ternary::_add<real>(alpha, beta), A_d, B_d, C_d, dimM, dimN, dimN, dimN, dimN); CHECK_SYNC("hl_matrix_add failed"); } #ifdef HPPL_TYPE_DOUBLE #define THRESHOLD 128 #else #define THRESHOLD 64 #endif __device__ __forceinline__ void findMax(real* I, real* dfMax_s, int blockSize, int base, int curIdx, int nextIdx, int dimN, real* max) { dfMax_s[base] = -1.0e20; while (curIdx < dimN) { if (dfMax_s[base] < I[nextIdx]) { dfMax_s[base] = I[nextIdx]; } nextIdx += blockSize; curIdx += blockSize; } __syncthreads(); for (int stride = blockSize >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (base < stride) { nextIdx = base + stride; if (dfMax_s[base] < dfMax_s[nextIdx]) { dfMax_s[base] = dfMax_s[nextIdx]; } } } if (0 == base) { max[0] = dfMax_s[0]; } __syncthreads(); } __device__ __forceinline__ void subMaxAndExp(real* I, real* O, int curIdx, int nextIdx, int blockSize, int dimN, real max) { real val; while (curIdx < dimN) { val = I[nextIdx] - max; if (val < -THRESHOLD) { val = -THRESHOLD; } I[nextIdx] = val; #ifndef HPPL_TYPE_DOUBLE O[nextIdx] = __expf(val); #else O[nextIdx] = exp(val); #endif nextIdx += blockSize; curIdx += blockSize; } __syncthreads(); } __device__ __forceinline__ void valueSum(real* O, real* dfMax_s, int blockSize, int base, int curIdx, int nextIdx, int dimN) { dfMax_s[base] = 0; while (curIdx < dimN) { dfMax_s[base] += O[nextIdx]; nextIdx += blockSize; curIdx += blockSize; } __syncthreads(); for (int stride = blockSize >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (base < stride) { nextIdx = base + stride; dfMax_s[base] += dfMax_s[nextIdx]; } } __syncthreads(); } __device__ __forceinline__ void divSum(real* O, real sum, int curIdx, int nextIdx, int blockSize, int dimN) { while (curIdx < dimN) { O[nextIdx] /= sum; nextIdx += blockSize; curIdx += blockSize; } } __device__ __forceinline__ void softmax(real* I, real* O, real* dfMax_s, int blockSize, int base, int curIdx, int nextIdx, int dimN) { __shared__ real max; // find the max number findMax(I, dfMax_s, blockSize, base, curIdx, nextIdx, dimN, &max); // sub max Value and do Exp operation subMaxAndExp(I, O, base, nextIdx, blockSize, dimN, max); // add dimN values into blockDim.x buffer // sum is in dfMax_s[0] valueSum(O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN); // divided by sum divSum(O, dfMax_s[0], curIdx, nextIdx, blockSize, dimN); } template<int blockSize> __global__ void KeMatrixSoftMax(real *O, real *I, int dimN) { int base = threadIdx.x; __shared__ real dfMax_s[blockSize]; int nextIdx = blockIdx.x * dimN + base; int curIdx = base; softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN); } void hl_matrix_softmax(real *A_d, real *C_d, int dimM, int dimN) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(C_d); dim3 block(512, 1); dim3 grid(dimM, 1); KeMatrixSoftMax<512> <<<grid, block, 0, STREAM_DEFAULT>>>(C_d, A_d, dimN); CHECK_SYNC("hl_matrix_softmax failed"); } template<int blockSize> __global__ void KeSequenceSoftMax(real *O, real *I, const int* index) { int base = threadIdx.x; int bid = blockIdx.x; __shared__ real dfMax_s[blockSize]; int start = index[bid]; int dimN = index[bid + 1] - start; int nextIdx = start + base; int curIdx = base; softmax(I, O, dfMax_s, blockSize, base, curIdx, nextIdx, dimN); } void hl_sequence_softmax_forward(real *A_d, real *C_d, const int* index, int numSequence) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(C_d); dim3 block(512, 1); dim3 grid(numSequence, 1); KeSequenceSoftMax<512> <<<grid, block, 0, STREAM_DEFAULT>>>(C_d, A_d, index); CHECK_SYNC("hl_sequence_softmax_forward failed"); } __global__ void KeMatrixDerivative(real *grad_d, real *output_d, real *sftmaxSum_d, int dimM, int dimN) { int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; int colIdx = blockIdx.y*blockDim.y + threadIdx.y; int index; if (rowIdx < dimM && colIdx < dimN) { index = rowIdx*dimN + colIdx; grad_d[index] = output_d[index] * (grad_d[index] - sftmaxSum_d[rowIdx]); } } void hl_matrix_softmax_derivative(real *grad_d, real *output_d, real *sftmaxSum_d, int dimM, int dimN) { CHECK_NOTNULL(grad_d); CHECK_NOTNULL(output_d); CHECK_NOTNULL(sftmaxSum_d); int blocksX = (dimM + 0) / 1; int blocksY = (dimN + 1024 -1) / 1024; dim3 threads(1, 1024); dim3 grid(blocksX, blocksY); KeMatrixDerivative<<< grid, threads, 0, STREAM_DEFAULT >>> (grad_d, output_d, sftmaxSum_d, dimM, dimN); CHECK_SYNC("hl_matrix_softmax_derivative failed"); } template<int blockSize> __global__ void KeMatrixClassificationError(real* in_A, int* in_B, real* out_C, int dimN) { __shared__ real max_s[blockSize]; __shared__ int max_l[blockSize]; const int tid = threadIdx.x; const int rowId = blockIdx.x; max_s[tid] = -1e30f; in_A += rowId * dimN; real tmp; for (int colId = tid; colId < dimN; colId += blockSize) { tmp = in_A[colId]; if (max_s[tid] < tmp) { max_s[tid] = tmp; max_l[tid] = colId; } } __syncthreads(); for (int stride = blockSize/2; stride > 0; stride = stride/2) { if (tid < stride) { if (max_s[tid] < max_s[tid + stride]) { max_s[tid] = max_s[tid + stride]; max_l[tid] = max_l[tid + stride]; } } __syncthreads(); } __syncthreads(); if (tid == 0) { out_C[rowId] = (max_l[0] == in_B[rowId] ? 0 : 1.0f); } } void hl_matrix_classification_error(real* A_d, int* B_d, real* C_d, int dimM, int dimN) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(B_d); CHECK_NOTNULL(C_d); // each sample is calculated by one block KeMatrixClassificationError<1024><<< dimM, 1024, 0, STREAM_DEFAULT >>> (A_d, B_d, C_d, dimN); CHECK_SYNC("hl_matrix_classification_error"); } __global__ void KeMatrixCrossEntropy(real* O, real* E, int* label, int dimM, int dimN) { int index = blockIdx.x * blockDim.x + threadIdx.x; int newBase; if (index < dimM) { newBase = label[index]; newBase = newBase % dimN; E[index] = -log(O[index * dimN + newBase]); } } void hl_matrix_cross_entropy(real* A_d, real* C_d, int* label_d, int dimM, int dimN) { CHECK_NOTNULL(A_d); CHECK_NOTNULL(C_d); int blocks = (dimM + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KeMatrixCrossEntropy<<< grid, threads, 0, STREAM_DEFAULT >>> (A_d, C_d, label_d, dimM, dimN); CHECK_SYNC("hl_matrix_cross_entropy failed"); } __global__ void KeMatrixCrossEntropyBp(real* grad_d, real* output_d, int* label_d, int dimM, int dimN) { int rowIdx = blockIdx.x*blockDim.x + threadIdx.x; int colIdx = blockIdx.y*blockDim.y + threadIdx.y; int index; if (rowIdx < dimM && colIdx < dimN) { index = rowIdx*dimN + colIdx; if (label_d[rowIdx] == colIdx) { grad_d[index] -= 1.0f / output_d[index]; } } } void hl_matrix_cross_entropy_bp(real* grad_d, real* output_d, int* label_d, int dimM, int dimN) { CHECK_NOTNULL(grad_d); CHECK_NOTNULL(output_d); CHECK_NOTNULL(label_d); int blocksX = (dimM + 0)/1; int blocksY = (dimN + 1024 -1) / 1024; dim3 threads(1, 1024); dim3 grid(blocksX, blocksY); KeMatrixCrossEntropyBp<<< grid, threads, 0, STREAM_DEFAULT >>> (grad_d, output_d, label_d, dimM, dimN); CHECK_SYNC("hl_matrix_cross_entropy_bp failed"); } void hl_matrix_zero_mem(real* data, int num) { hl_gpu_apply_unary_op( unary::Zero<real>(), data, 1, num, num); } __global__ void KeParamReluForward(real* output, real* input, real* w, int width, int height, int partial_sum) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx < width && ty < height) { int index = ty * width + tx; output[index] = input[index] > 0 ? input[index] : input[index] * w[tx / partial_sum]; } } void hl_param_relu_forward(real* output, real* input, real* w, int width, int height, int partial_sum) { CHECK_NOTNULL(output); CHECK_NOTNULL(input); CHECK_NOTNULL(w); dim3 threads(16, 16); int blockX = (width + 16 - 1) / 16; int blockY = (height + 16 -1) / 16; dim3 grid(blockX, blockY); KeParamReluForward<<<grid, threads, 0, STREAM_DEFAULT>>> (output, input, w, width, height, partial_sum); CHECK_SYNC("hl_param_relu_forward failed"); } template<int blockSize> __global__ void KeParamReluBackWardW(real* grad_w, real* grad_o, real* input, int width, int height, int partial_sum) { const int tid = threadIdx.x; __shared__ real temp[blockSize]; grad_o += partial_sum * blockIdx.x; input += partial_sum * blockIdx.x; real tmp = 0.0; for (int index = tid; index < partial_sum * height; index += blockSize) { int row = index / partial_sum; int offset = row * width + (index - row * partial_sum); if (input[offset] < 0) { tmp += grad_o[offset] * input[offset]; } } temp[tid] = tmp; __syncthreads(); for (int s = blockSize / 2; s > 0; s >>= 1) { if (tid < s) { temp[tid] += temp[tid + s]; } __syncthreads(); } if (tid == 0) { grad_w[blockIdx.x] += temp[0]; } } void hl_param_relu_backward_w(real* grad_w, real* grad_o, real* input, int width, int height, int partial_sum) { CHECK_NOTNULL(grad_w); CHECK_NOTNULL(grad_o); CHECK_NOTNULL(input); const int blockSize = 1024; int grid_num = width / partial_sum; dim3 threads(blockSize, 1); dim3 grid(grid_num, 1); KeParamReluBackWardW<blockSize><<<grid, threads, 0, STREAM_DEFAULT>>> (grad_w, grad_o, input, width, height, partial_sum); CHECK_SYNC("hl_param_relu_backward_w failed"); } __global__ void KeParamReluBackwardDiff(real* grad_o, real* input, real* w, real* diff, int width, int height, int partial_sum) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx < width && ty < height) { int index = ty * width + tx; diff[index] += grad_o[index] * (input[index] > 0 ? 1 : w[tx / partial_sum]); } } void hl_param_relu_backward_diff(real* grad_o, real* data, real* w, real* diff, int width, int height, int partial_sum) { CHECK_NOTNULL(grad_o); CHECK_NOTNULL(data); CHECK_NOTNULL(w); CHECK_NOTNULL(diff); dim3 threads(16, 16); int blockX = (width + 16 - 1) / 16; int blockY = (height + 16 -1) / 16; dim3 grid(blockX, blockY); KeParamReluBackwardDiff<<<grid, threads, 0, STREAM_DEFAULT>>> (grad_o, data, w, diff, width, height, partial_sum); CHECK_SYNC("hl_param_relu_backward_diff failed"); } template<int blockSize> __global__ void KeCosSim(real* output, real* input1, real* input2, int width, int input1_height, int input2_height, real scale) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ real xx[blockSize]; __shared__ real yy[blockSize]; __shared__ real xy[blockSize]; xx[tid] = 0.0; yy[tid] = 0.0; xy[tid] = 0.0; __syncthreads(); input1 += ty * width; if (input2_height > 1) { input2 += ty * width; } for (int index = tid; index < width; index += blockSize) { real x = input1[index]; real y = input2[index]; xx[tid] += x * x; yy[tid] += y * y; xy[tid] += x * y; } __syncthreads(); for (int s = blockSize / 2; s > 0; s >>= 1) { if (tid < s) { xx[tid] += xx[tid + s]; yy[tid] += yy[tid + s]; xy[tid] += xy[tid + s]; } __syncthreads(); } if (tid == 0) { output[ty] = scale * xy[0] / (sqrt(xx[0]) * sqrt(yy[0])); } } void hl_cossim(real* output, real* input1, real* input2, int width, int input1_height, int input2_height, real scale) { CHECK_NOTNULL(output); CHECK_NOTNULL(input1); CHECK_NOTNULL(input2); const int blockSize = 256; dim3 threads(blockSize, 1); dim3 grid(1, input1_height); KeCosSim<blockSize><<<grid, threads, 0, STREAM_DEFAULT>>> (output, input1, input2, width, input1_height, input2_height, scale); CHECK_SYNC("hl_cossim failed"); } template<int blockSize> __global__ void KeCosSimDerivative(real* grad, real* output, real* prevOutX, real* prevOutY, real* prevGradX, real* prevGradY, int width, int input1_height, int input2_height, real scale) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ real xx[blockSize]; __shared__ real yy[blockSize]; __shared__ real xy[blockSize]; xx[tid] = 0.0; yy[tid] = 0.0; xy[tid] = 0.0; __syncthreads(); prevOutX += ty * width; prevGradX += ty * width; if (input2_height > 1) { prevOutY += ty * width; prevGradY += ty * width; } for (int index = tid; index < width; index += blockSize) { real x = prevOutX[index]; real y = prevOutY[index]; xx[tid] += x * x; yy[tid] += y * y; xy[tid] += x * y; } __syncthreads(); for (int s = blockSize / 2; s > 0; s >>= 1) { if (tid < s) { xx[tid] += xx[tid + s]; yy[tid] += yy[tid + s]; xy[tid] += xy[tid + s]; } __syncthreads(); } if (xy[0] == 0) { real reciprocal = 1.0 / (sqrt(xx[0]) * sqrt(yy[0])); for (int index = tid; index < width; index += blockSize) { prevGradX[index] += scale * grad[ty] * prevOutY[index] * reciprocal; if (input2_height > 1) { prevGradY[index] += scale * grad[ty] * prevOutX[index] * reciprocal; } else { paddle::paddleAtomicAdd(prevGradY + index, scale * grad[ty] * prevOutX[index] * reciprocal); } } } else { real reciprocalXY = 1.0 / xy[0]; real reciprocalSquareSumX = 1.0 / xx[0]; real reciprocalSquareSumY = 1.0 / yy[0]; for (int index = tid; index < width; index += blockSize) { prevGradX[index] += output[ty] * grad[ty] * (prevOutY[index] * reciprocalXY - prevOutX[index] * reciprocalSquareSumX); if (input2_height > 1) { prevGradY[index] += output[ty] * grad[ty] * (prevOutX[index] * reciprocalXY - prevOutY[index] * reciprocalSquareSumY); } else { paddle::paddleAtomicAdd(prevGradY + index, output[ty] * grad[ty] * (prevOutX[index] * reciprocalXY - prevOutY[index] * reciprocalSquareSumY)); } } } } void hl_cossim_derivative(real* grad, real* output, real* prevOutX, real* prevOutY, real* prevGradX, real* prevGradY, int width, int input1_height, int input2_height, real scale) { CHECK_NOTNULL(grad); CHECK_NOTNULL(output); CHECK_NOTNULL(prevOutX); CHECK_NOTNULL(prevOutY); CHECK_NOTNULL(prevGradX); CHECK_NOTNULL(prevGradY); const int blockSize = 256; dim3 threads(blockSize, 1); dim3 grid(1, input1_height); KeCosSimDerivative<blockSize><<<grid, threads, 0, STREAM_DEFAULT>>> (grad, output, prevOutX, prevOutY, prevGradX, prevGradY, width, input1_height, input2_height, scale); CHECK_SYNC("hl_cossim_derivate failed"); }
e56e39aa62d9ae525cbbc9451299709cce54293f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include"/usr/include/opencv/highgui.h" #include"cuda_runtime.h" #include<math.h> #include<malloc.h> //define the texture that we use in the function later texture<char,2,hipReadModeElementType> teximg; __global__ void wavelet_decom_nlevel(float* img_out , float*img_in,int nlevel,int width); __global__ void wavelet_decom(char* img,int img_width); /* in the this function , img represents for the array of img,img_width is used for change from 2D img into 1D form. img must be char type for there are some negative values after the wavelet decomposition ilevel is the maximum level of waveet_transform you want to do with the image. ilevel should not be larger than log_2(width),usually we choose 2 or 3 */ // __device__ float* gtemp; int main(){ //firstly load the image you want to do the wavelet transform IplImage* img = cvLoadImage("shepp1024.jpg",0); // uchar* tmp ={0}; cvNamedWindow("Image_show",CV_WINDOW_AUTOSIZE); cvShowImage("Image_show",img); //to compare with the image after wavelet transform,show the image before the transform //test the type of the image printf("width = %d\n height = %d\n",img->width,img->height); //the results shows that the img have a very different type of data // printf("%d",img.at<double>(1,1)); printf("%f\n",(img->imageData[1]+img->imageData[2])/2.0); //as I turn the image number from char to unchar,it can be printed well on the screen //to store the image which we can used to do the wavelet transform we choose a double type array /* char big[img->width][img->height]; int a = 0; //give the value to tmp for(int i = 0; i< img->height;i++) { for(int j = 0; j <img->width;j++) { big[i][j] = img->imageData[i*img->width + j]; if(big[i][j]>127) {printf("%d\n",(int)big[i][j]);a++;} } } */ printf("the size of float: %lu\n",sizeof(float)); printf("the depth : %d\n",img->depth); printf("the channels : %d\n",img->nChannels); printf("type of store: %d\n",img->dataOrder); printf("origin: %d\n",img->origin); printf("imageSize %d\n",img->imageSize); printf("widthStep: %d\n",img->widthStep); // printf("the length of data : %lu\n",sizeof(img->imageData)/sizeof(char)); //3channellDesc3 //CUDA,, //unsigned // hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8,0,0,0,hipChannelFormatKindSigned); hipArray *imgOnMemory; hipMallocArray(&imgOnMemory,&channelDesc,img->height,img->width); //,Step hipMemcpyToArray(imgOnMemory,0,0,img->imageData,img->height*img->width,hipMemcpyHostToDevice); // hipBindTextureToArray(&teximg,imgOnMemory,&channelDesc); float l = 66.6; printf("%c\n",(char)l); //img_new_texture IplImage* img_new_texture = cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8S,1); char * temp; hipMalloc((char**)&temp,img->width*img->height*sizeof(char)); IplImage* img_float = cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_32F,1); float *temp_float; hipMalloc((float**)&temp_float,img->width*img->height*sizeof(float)); float *odd; hipMalloc((void**)&odd,img->width*img->height*sizeof(float)); hipMemcpyToSymbol(gtemp,&odd,sizeof(float*),size_t(0),hipMemcpyHostToDevice); //cudaimg->imageData float *hostimg; hostimg = (float *) malloc(sizeof(float)*img->height*img->width); for(int j = 0 ;j<img->width*img->height;j++) { hostimg[j] = (uchar)img->imageData[j]/1.0; // if(hostimg[j]>0) printf("%lf\n",hostimg[j]); }//printf("%lf\n",hostimg[5]); float *datatran; hipMalloc((float**)&datatran,img->width*img->height*sizeof(float)); hipMemcpy(datatran,hostimg,img->width*img->height*sizeof(float),hipMemcpyHostToDevice); dim3 dimBlock(32,32); // dim3 dimGrid(8,8); dim3 dimGrid((img->widthStep + dimBlock.x -1)/dimBlock.x,(img->height + dimBlock.y -1)/dimBlock.y); // hipLaunchKernelGGL(( wavelet_decom), dim3(dimGrid),dim3(dimBlock), 0, 0, temp,img->width); hipDeviceSynchronize(); // hipMemcpy(img_new_texture->imageData,temp,img->widthStep*img->height*sizeof(char),hipMemcpyDeviceToHost); // hipLaunchKernelGGL(( wavelet_decom_nlevel), dim3(dimGrid),dim3(dimBlock), 0, 0, temp_float,datatran,1,img->width); // hipDeviceSynchronize(); hipMemcpy(img_float->imageData,temp_float,img->width*img->height*sizeof(float),hipMemcpyDeviceToHost); printf("value : %d\n",img_float->imageData[1]); // // for(int flag = 0;flag<img->height*img->width;flag++) img_new_texture->imageData[flag] = img->imageData[flag]-128; cvNamedWindow("wavelet",CV_WINDOW_AUTOSIZE); cvShowImage("wavelet",img_new_texture); cvNamedWindow("odinary-way",CV_WINDOW_AUTOSIZE); cvShowImage("odinary-way",img_float); // cvSaveImage("./wavelet.jpg",img_float); cvSaveImage("./texture.jpg",img_new_texture); hipUnbindTexture(&teximg); hipFreeArray(imgOnMemory); hipFree(temp); hipFree(temp_float); hipFree(datatran); hipFree(odd); free(hostimg); cvWaitKey(0); cvReleaseImage(&img); cvReleaseImage(&img_new_texture); cvReleaseImage(&img_float); cvDestroyWindow("Image_show"); cvDestroyWindow("wavelet"); return 0; } //, // __global__ void wavelet_decom(char* img,int img_width) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; /* if(x <img_width/2) img[y*img_width + x] = 15; else img[y*img_width + x] = -100; */ if(x<img_width/2&&y<img_width/2){ img[y*img_width + x] = ((tex2D(teximg,2*x+1,2*y+1)+ tex2D(teximg,2*x,2*y+1))+tex2D(teximg,2*x+1,2*y)+tex2D(teximg,2*x,2*y))/4 - 127; img[y*img_width + x + img_width/2] = ((tex2D(teximg,2*x+1,2*y+1)- tex2D(teximg,2*x,2*y+1))+tex2D(teximg,2*x+1,2*y)-tex2D(teximg,2*x,2*y))/2 ; //y img[(y+img_width/2)*img_width + x] = ((tex2D(teximg,2*x+1,2*y+1)+ tex2D(teximg,2*x,2*y+1))-tex2D(teximg,2*x+1,2*y)-tex2D(teximg,2*x,2*y))/2 ; img[(y+img_width/2)*img_width + x + img_width/2] = ((tex2D(teximg,2*x+1,2*y+1)- tex2D(teximg,2*x,2*y+1))-tex2D(teximg,2*x+1,2*y)+tex2D(teximg,2*x,2*y)) ; } if(img[y*img_width+x]<5&&img[y*img_width+x]>-5) img[y*img_width + x] = 0; } //texture, __global__ void wavelet_decom_nlevel(float* img_out , float*img_in,int nlevel,int width){ unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; int widthtemp = width; gtemp[y*width + x] = (uchar) img_in[y*width+x]/256.0; __syncthreads(); // img_out[y*width + x] = img_out[y*width +x ] +0.5; // for(int i = 0; i<nlevel ; i++){ widthtemp = widthtemp/2; //x if(x<widthtemp&&y<widthtemp){ img_out[y*width+x] =1/256.0*((img_in[2*y*width+2*x] + img_in[2*y*width+2*x+1] + img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/4.0); // __syncthreads(); img_out[y*width+x+widthtemp] =1/256.0* (-img_in[2*y*width+2*x] + img_in[2*y*width+2*x+1] - img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/2.0; // __syncthreads(); img_out[(y+widthtemp)*width+x] = 1/256.0*(-img_in[2*y*width+2*x] - img_in[2*y*width+2*x+1] + img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/2.0; // __syncthreads(); img_out[(y+widthtemp)*width+x+widthtemp] =1/256.0* (img_in[2*y*width+2*x] - img_in[2*y*width+2*x+1] - img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/1.0; // __syncthreads(); } /* if(x<widthtemp&&y<widthtemp){ temp[y*width+x] = img_out[2*y*width+2*x]; __syncthreads(); temp[y*width+x+widthtemp] = img_out[2*y*width+2*x+1]; __syncthreads(); temp[(y+widthtemp)*width+x] = img_out[(2*y+1)*width+2*x]; __syncthreads(); temp[(y+widthtemp)*width+x+widthtemp] = img_out[(2*y+1)*width+2*x+1]; __syncthreads(); } */ // // img_out[y*width+x]=gtemp[y*width+x]; __syncthreads(); }
e56e39aa62d9ae525cbbc9451299709cce54293f.cu
#include<stdio.h> #include"/usr/include/opencv/highgui.h" #include"cuda_runtime.h" #include<math.h> #include<malloc.h> //define the texture that we use in the function later texture<char,2,cudaReadModeElementType> teximg; __global__ void wavelet_decom_nlevel(float* img_out , float*img_in,int nlevel,int width); __global__ void wavelet_decom(char* img,int img_width); /* in the this function , img represents for the array of img,img_width is used for change from 2D img into 1D form. img must be char type for there are some negative values after the wavelet decomposition ilevel is the maximum level of waveet_transform you want to do with the image. ilevel should not be larger than log_2(width),usually we choose 2 or 3 */ //全局变量 __device__ float* gtemp; int main(){ //firstly load the image you want to do the wavelet transform IplImage* img = cvLoadImage("shepp1024.jpg",0); // uchar* tmp ={0}; cvNamedWindow("Image_show",CV_WINDOW_AUTOSIZE); cvShowImage("Image_show",img); //to compare with the image after wavelet transform,show the image before the transform //test the type of the image printf("width = %d\n height = %d\n",img->width,img->height); //the results shows that the img have a very different type of data // printf("%d",img.at<double>(1,1)); printf("%f\n",(img->imageData[1]+img->imageData[2])/2.0); //as I turn the image number from char to unchar,it can be printed well on the screen //to store the image which we can used to do the wavelet transform we choose a double type array /* char big[img->width][img->height]; int a = 0; //give the value to tmp for(int i = 0; i< img->height;i++) { for(int j = 0; j <img->width;j++) { big[i][j] = img->imageData[i*img->width + j]; if(big[i][j]>127) {printf("%d\n",(int)big[i][j]);a++;} } } */ printf("the size of float: %lu\n",sizeof(float)); printf("the depth : %d\n",img->depth); printf("the channels : %d\n",img->nChannels); printf("type of store: %d\n",img->dataOrder); printf("origin: %d\n",img->origin); printf("imageSize %d\n",img->imageSize); printf("widthStep: %d\n",img->widthStep); // printf("the length of data : %lu\n",sizeof(img->imageData)/sizeof(char)); //这里我们得到的结果是图像是3通道的,所以后面的channellDesc需要修改,因此我将底下的改成了3通道。 //开辟一段CUDA数组型内存,高度等于图像高度,宽度等于图像宽度,同时也定义了数据类型, //在小波变换的过程中如果想要保证数据格式不发生变化,那么就使用unsigned类型,但是这会带来一定的问题 //在计算的过程中,小数点后面的部分就没有了。对重建会有一定的影响。 cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8,0,0,0,cudaChannelFormatKindSigned); cudaArray *imgOnMemory; cudaMallocArray(&imgOnMemory,&channelDesc,img->height,img->width); //将图像扔进内存中,这里有个Step存疑 cudaMemcpyToArray(imgOnMemory,0,0,img->imageData,img->height*img->width,cudaMemcpyHostToDevice); //绑定纹理引用 cudaBindTextureToArray(&teximg,imgOnMemory,&channelDesc); float l = 66.6; printf("%c\n",(char)l); //定义一个新的图像img_new_texture IplImage* img_new_texture = cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8S,1); char * temp; cudaMalloc((char**)&temp,img->width*img->height*sizeof(char)); IplImage* img_float = cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_32F,1); float *temp_float; cudaMalloc((float**)&temp_float,img->width*img->height*sizeof(float)); float *odd; cudaMalloc((void**)&odd,img->width*img->height*sizeof(float)); cudaMemcpyToSymbol(gtemp,&odd,sizeof(float*),size_t(0),cudaMemcpyHostToDevice); //定义一个cuda数组用于传递img->imageData float *hostimg; hostimg = (float *) malloc(sizeof(float)*img->height*img->width); for(int j = 0 ;j<img->width*img->height;j++) { hostimg[j] = (uchar)img->imageData[j]/1.0; // if(hostimg[j]>0) printf("%lf\n",hostimg[j]); }//printf("%lf\n",hostimg[5]); float *datatran; cudaMalloc((float**)&datatran,img->width*img->height*sizeof(float)); cudaMemcpy(datatran,hostimg,img->width*img->height*sizeof(float),cudaMemcpyHostToDevice); dim3 dimBlock(32,32); // dim3 dimGrid(8,8); dim3 dimGrid((img->widthStep + dimBlock.x -1)/dimBlock.x,(img->height + dimBlock.y -1)/dimBlock.y); //用纹理储存器进行计算 wavelet_decom<<<dimGrid,dimBlock>>>(temp,img->width); cudaThreadSynchronize(); //将数据传回 cudaMemcpy(img_new_texture->imageData,temp,img->widthStep*img->height*sizeof(char),cudaMemcpyDeviceToHost); //用普通的进行计算 wavelet_decom_nlevel<<<dimGrid,dimBlock>>>(temp_float,datatran,1,img->width); // cudaDeviceSynchronize(); cudaMemcpy(img_float->imageData,temp_float,img->width*img->height*sizeof(float),cudaMemcpyDeviceToHost); printf("value : %d\n",img_float->imageData[1]); //显示图片 // for(int flag = 0;flag<img->height*img->width;flag++) img_new_texture->imageData[flag] = img->imageData[flag]-128; cvNamedWindow("wavelet",CV_WINDOW_AUTOSIZE); cvShowImage("wavelet",img_new_texture); cvNamedWindow("odinary-way",CV_WINDOW_AUTOSIZE); cvShowImage("odinary-way",img_float); // 保存图片到地 cvSaveImage("./wavelet.jpg",img_float); cvSaveImage("./texture.jpg",img_new_texture); cudaUnbindTexture(&teximg); cudaFreeArray(imgOnMemory); cudaFree(temp); cudaFree(temp_float); cudaFree(datatran); cudaFree(odd); free(hostimg); cvWaitKey(0); cvReleaseImage(&img); cvReleaseImage(&img_new_texture); cvReleaseImage(&img_float); cvDestroyWindow("Image_show"); cvDestroyWindow("wavelet"); return 0; } //这个函数用于做图像小波分解,使用纹理内存,后来意识到纹理内存只能读取,所以这个函数只能实现一级小波分解 //基本上没啥用 __global__ void wavelet_decom(char* img,int img_width) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; /* if(x <img_width/2) img[y*img_width + x] = 15; else img[y*img_width + x] = -100; */ if(x<img_width/2&&y<img_width/2){ img[y*img_width + x] = ((tex2D(teximg,2*x+1,2*y+1)+ tex2D(teximg,2*x,2*y+1))+tex2D(teximg,2*x+1,2*y)+tex2D(teximg,2*x,2*y))/4 - 127; img[y*img_width + x + img_width/2] = ((tex2D(teximg,2*x+1,2*y+1)- tex2D(teximg,2*x,2*y+1))+tex2D(teximg,2*x+1,2*y)-tex2D(teximg,2*x,2*y))/2 ; //同样对y方向也做一样的处理 img[(y+img_width/2)*img_width + x] = ((tex2D(teximg,2*x+1,2*y+1)+ tex2D(teximg,2*x,2*y+1))-tex2D(teximg,2*x+1,2*y)-tex2D(teximg,2*x,2*y))/2 ; img[(y+img_width/2)*img_width + x + img_width/2] = ((tex2D(teximg,2*x+1,2*y+1)- tex2D(teximg,2*x,2*y+1))-tex2D(teximg,2*x+1,2*y)+tex2D(teximg,2*x,2*y)) ; } if(img[y*img_width+x]<5&&img[y*img_width+x]>-5) img[y*img_width + x] = 0; } //这个是不用texture的,可以做多层 __global__ void wavelet_decom_nlevel(float* img_out , float*img_in,int nlevel,int width){ unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; int widthtemp = width; gtemp[y*width + x] = (uchar) img_in[y*width+x]/256.0; __syncthreads(); // img_out[y*width + x] = img_out[y*width +x ] +0.5; // for(int i = 0; i<nlevel ; i++){ widthtemp = widthtemp/2; //先对x做 if(x<widthtemp&&y<widthtemp){ img_out[y*width+x] =1/256.0*((img_in[2*y*width+2*x] + img_in[2*y*width+2*x+1] + img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/4.0); // __syncthreads(); img_out[y*width+x+widthtemp] =1/256.0* (-img_in[2*y*width+2*x] + img_in[2*y*width+2*x+1] - img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/2.0; // __syncthreads(); img_out[(y+widthtemp)*width+x] = 1/256.0*(-img_in[2*y*width+2*x] - img_in[2*y*width+2*x+1] + img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/2.0; // __syncthreads(); img_out[(y+widthtemp)*width+x+widthtemp] =1/256.0* (img_in[2*y*width+2*x] - img_in[2*y*width+2*x+1] - img_in[(2*y+1)*width+2*x] + img_in[(2*y+1)*width+2*x+1])/1.0; // __syncthreads(); } /* if(x<widthtemp&&y<widthtemp){ temp[y*width+x] = img_out[2*y*width+2*x]; __syncthreads(); temp[y*width+x+widthtemp] = img_out[2*y*width+2*x+1]; __syncthreads(); temp[(y+widthtemp)*width+x] = img_out[(2*y+1)*width+2*x]; __syncthreads(); temp[(y+widthtemp)*width+x+widthtemp] = img_out[(2*y+1)*width+2*x+1]; __syncthreads(); } */ //用于测试 全局变量是否有效 // img_out[y*width+x]=gtemp[y*width+x]; __syncthreads(); }
b4751ef840537f54a9763dfc3f510467421f0f15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double * __tilevar_4__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double * __tilevar_5__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8); int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__]; t2 = input[__iter_3__+M*(__iter_y__+1)]; } // Initial computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ double __temp_2__ = b2; double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_10__ = (__temp_6__ + 15 * __temp_9__); double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_14__ = (__temp_10__ + 12 * __temp_13__); double __temp_17__ = t2; double __temp_18__ = (__temp_14__ + 5 * __temp_17__); double __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ double __temp_32__ = b3; double __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); double __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); double __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_40__ = (__temp_36__ + 15 * __temp_39__); double __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_44__ = (__temp_40__ + 12 * __temp_43__); double __temp_47__ = t3; double __temp_48__ = (__temp_44__ + 5 * __temp_47__); double __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ double __temp_60__ = b4; double __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); double __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); double __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); double __temp_64__ = (__temp_62__ + 15 * __temp_63__); double __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); double __temp_66__ = (__temp_64__ + 12 * __temp_65__); double __temp_67__ = t4; double __temp_68__ = (__temp_66__ + 5 * __temp_67__); double __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } } // Rest of the computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ double __temp_2__ = b2; double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_10__ = (__temp_6__ + 15 * __temp_9__); double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_14__ = (__temp_10__ + 12 * __temp_13__); double __temp_17__ = t2; double __temp_18__ = (__temp_14__ + 5 * __temp_17__); double __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ double __temp_32__ = b3; double __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); double __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); double __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_40__ = (__temp_36__ + 15 * __temp_39__); double __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_44__ = (__temp_40__ + 12 * __temp_43__); double __temp_47__ = t3; double __temp_48__ = (__temp_44__ + 5 * __temp_47__); double __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ double __temp_60__ = b4; double __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); double __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); double __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); double __temp_64__ = (__temp_62__ + 15 * __temp_63__); double __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); double __temp_66__ = (__temp_64__ + 12 * __temp_65__); double __temp_67__ = t4; double __temp_68__ = (__temp_66__ + 5 * __temp_67__); double __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ double __temp_80__ = b5; double __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]); double __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); double __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]); double __temp_84__ = (__temp_82__ + 15 * __temp_83__); double __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]); double __temp_86__ = (__temp_84__ + 12 * __temp_85__); double __temp_87__ = t5; double __temp_88__ = (__temp_86__ + 5 * __temp_87__); double __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){ /* Host allocation Begin */ double * input; hipMalloc(&input,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input); } double * __var_1__; hipMalloc(&__var_1__,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, 128); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); for (int i = 0; i < 125; i++) { hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input); } Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); for (int n = 0 ; n < 5; n++) { #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif for (int i = 0; i < 125; i++) { hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input); } #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif } hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__); /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); } /*Host Free End*/
b4751ef840537f54a9763dfc3f510467421f0f15.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double * __tilevar_4__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double * __tilevar_5__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X); double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8); int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__]; t2 = input[__iter_3__+M*(__iter_y__+1)]; } // Initial computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ double __temp_2__ = b2; double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_10__ = (__temp_6__ + 15 * __temp_9__); double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_14__ = (__temp_10__ + 12 * __temp_13__); double __temp_17__ = t2; double __temp_18__ = (__temp_14__ + 5 * __temp_17__); double __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ double __temp_32__ = b3; double __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); double __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); double __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_40__ = (__temp_36__ + 15 * __temp_39__); double __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_44__ = (__temp_40__ + 12 * __temp_43__); double __temp_47__ = t3; double __temp_48__ = (__temp_44__ + 5 * __temp_47__); double __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ double __temp_60__ = b4; double __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); double __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); double __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); double __temp_64__ = (__temp_62__ + 15 * __temp_63__); double __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); double __temp_66__ = (__temp_64__ + 12 * __temp_65__); double __temp_67__ = t4; double __temp_68__ = (__temp_66__ + 5 * __temp_67__); double __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } } // Rest of the computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ double __temp_2__ = b2; double __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); double __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); double __temp_10__ = (__temp_6__ + 15 * __temp_9__); double __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); double __temp_14__ = (__temp_10__ + 12 * __temp_13__); double __temp_17__ = t2; double __temp_18__ = (__temp_14__ + 5 * __temp_17__); double __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ double __temp_32__ = b3; double __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); double __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); double __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); double __temp_40__ = (__temp_36__ + 15 * __temp_39__); double __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); double __temp_44__ = (__temp_40__ + 12 * __temp_43__); double __temp_47__ = t3; double __temp_48__ = (__temp_44__ + 5 * __temp_47__); double __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ double __temp_60__ = b4; double __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); double __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); double __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); double __temp_64__ = (__temp_62__ + 15 * __temp_63__); double __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); double __temp_66__ = (__temp_64__ + 12 * __temp_65__); double __temp_67__ = t4; double __temp_68__ = (__temp_66__ + 5 * __temp_67__); double __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ double __temp_80__ = b5; double __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]); double __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); double __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]); double __temp_84__ = (__temp_82__ + 15 * __temp_83__); double __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]); double __temp_86__ = (__temp_84__ + 12 * __temp_85__); double __temp_87__ = t5; double __temp_88__ = (__temp_86__ + 5 * __temp_87__); double __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){ /* Host allocation Begin */ double * input; cudaMalloc(&input,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input); } double * __var_1__; cudaMalloc(&__var_1__,sizeof(double)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, 128); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); for (int i = 0; i < 125; i++) { __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input); } Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); for (int n = 0 ; n < 5; n++) { #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif for (int i = 0; i < 125; i++) { __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input); } #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif } cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__); /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); } /*Host Free End*/
d681083c80d004bf71d3240319767c124b087793.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" #include <iostream> #define checkCUDAErrorWithLine(msg) checkCUDAErrorFn(msg, __FILE__, __LINE__) #define blockSize 512 namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ __global__ void kernNaiveScan(int N, int d, int* odata, const int* idata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // int power = 1; // if(d>1){ // for(int i=0; i<d-1; i++){ // power *= 2; // } // } int power = (int) powf(2.0f, (float) d-1); if (index >= power){ odata[index] = idata[index-power] + idata[index]; }else{ odata[index] = idata[index]; } } int getNextPower(int _N){ int N = 1; while(N < _N){ N *= 2; } return N; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int _N, int *odata, const int *idata) { int N = getNextPower(_N); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // copy data to gpu buffer int* dev_odata; int* dev_idata; hipMalloc((void**)&dev_odata, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_odata failed!"); hipMalloc((void**)&dev_idata, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_idata failed!"); hipMemcpy(dev_idata, idata, sizeof(int) * _N, hipMemcpyHostToDevice); hipMemcpy(dev_odata, idata, sizeof(int) * _N, hipMemcpyHostToDevice); timer().startGpuTimer(); for(int d=1;d<=ilog2ceil(N);d++){ int* tmp = dev_idata; dev_idata = dev_odata; dev_odata = tmp; hipLaunchKernelGGL(( kernNaiveScan), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, d, dev_odata, dev_idata); hipDeviceSynchronize(); } hipDeviceSynchronize(); timer().endGpuTimer(); hipMemcpy(odata, dev_odata, sizeof(int) * _N, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(dev_odata); hipFree(dev_idata); } } }
d681083c80d004bf71d3240319767c124b087793.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" #include <iostream> #define checkCUDAErrorWithLine(msg) checkCUDAErrorFn(msg, __FILE__, __LINE__) #define blockSize 512 namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ __global__ void kernNaiveScan(int N, int d, int* odata, const int* idata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // int power = 1; // if(d>1){ // for(int i=0; i<d-1; i++){ // power *= 2; // } // } int power = (int) powf(2.0f, (float) d-1); if (index >= power){ odata[index] = idata[index-power] + idata[index]; }else{ odata[index] = idata[index]; } } int getNextPower(int _N){ int N = 1; while(N < _N){ N *= 2; } return N; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int _N, int *odata, const int *idata) { int N = getNextPower(_N); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // copy data to gpu buffer int* dev_odata; int* dev_idata; cudaMalloc((void**)&dev_odata, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_odata failed!"); cudaMalloc((void**)&dev_idata, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_idata failed!"); cudaMemcpy(dev_idata, idata, sizeof(int) * _N, cudaMemcpyHostToDevice); cudaMemcpy(dev_odata, idata, sizeof(int) * _N, cudaMemcpyHostToDevice); timer().startGpuTimer(); for(int d=1;d<=ilog2ceil(N);d++){ int* tmp = dev_idata; dev_idata = dev_odata; dev_odata = tmp; kernNaiveScan<<<fullBlocksPerGrid, blockSize>>>(N, d, dev_odata, dev_idata); cudaDeviceSynchronize(); } cudaDeviceSynchronize(); timer().endGpuTimer(); cudaMemcpy(odata, dev_odata, sizeof(int) * _N, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaFree(dev_odata); cudaFree(dev_idata); } } }
a8ed6391e42d54d7231a3d9af76c66a404558930.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/SlitGeometryFillerGPU.cu * \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU */ #include "ParticleDataUtilities.h" #include "SlitPoreGeometryFillerGPU.cuh" #include "hoomd/RNGIdentifiers.h" #include "hoomd/RandomNumbers.h" namespace mpcd { namespace gpu { namespace kernel { /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param vel_factor Scale factor for uniform normal velocities consistent with particle mass / * temperature \param timestep Current timestep \param seed User seed to PRNG for drawing velocities * * \b Implementation: * * Using one thread per particle, the thread is assigned to a fill range matching a 2d bounding box, * which defines a cuboid of volume to fill. The thread index is translated into a particle tag * and local particle index. A random position is drawn within the cuboid. A random velocity * is drawn consistent with the speed of the moving wall. */ __global__ void slit_pore_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const BoxDim box, const Scalar4* d_boxes, const uint2* d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar vel_factor, const uint64_t timestep, const uint16_t seed) { // num_boxes should be 6, so this will all fit in shmem extern __shared__ char s_data[]; Scalar4* s_boxes = (Scalar4*)(&s_data[0]); uint2* s_ranges = (uint2*)(&s_data[sizeof(Scalar4) * num_boxes]); for (unsigned int offset = 0; offset < num_boxes; offset += blockDim.x) { if (offset + threadIdx.x < num_boxes) { const unsigned int boxid = offset + threadIdx.x; s_boxes[boxid] = d_boxes[boxid]; s_ranges[boxid] = d_ranges[boxid]; } } __syncthreads(); // one thread per particle const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; // linear search for box matching thread (num_boxes is small) Scalar3 lo = box.getLo(); Scalar3 hi = box.getHi(); for (unsigned int boxid = 0; boxid < num_boxes; ++boxid) { const uint2 range = s_ranges[boxid]; if (idx >= range.x && idx < range.y) { const Scalar4 fillbox = s_boxes[boxid]; lo.x = fillbox.x; hi.x = fillbox.y; lo.z = fillbox.z; hi.z = fillbox.w; break; } } // particle tag and index const unsigned int tag = first_tag + idx; const unsigned int pidx = first_idx + idx; d_tag[pidx] = tag; // initialize random number generator for positions and velocity hoomd::RandomGenerator rng( hoomd::Seed(hoomd::RNGIdentifier::SlitPoreGeometryFiller, timestep, seed), hoomd::Counter(tag)); d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng), hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng), hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng), __int_as_scalar(type)); hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0); Scalar3 vel; gen(vel.x, vel.y, rng); vel.z = gen(rng); // TODO: should these be given zero net-momentum contribution (relative to the frame of // reference?) d_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL)); } } // end namespace kernel /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param mass Mass of fill particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param kT Temperature for fill particles * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * \param block_size Number of threads per block * * \sa kernel::slit_pore_draw_particles */ hipError_t slit_pore_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const BoxDim& box, const Scalar4* d_boxes, const uint2* d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const Scalar mass, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar kT, const uint64_t timestep, const uint16_t seed, const unsigned int block_size) { if (N_tot == 0) return hipSuccess; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)kernel::slit_pore_draw_particles); max_block_size = attr.maxThreadsPerBlock; } // precompute factor for rescaling the velocities since it is the same for all particles const Scalar vel_factor = fast::sqrt(kT / mass); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); const size_t shared_bytes = num_boxes * (sizeof(Scalar4) + sizeof(uint2)); hipLaunchKernelGGL(( kernel::slit_pore_draw_particles), dim3(grid), dim3(run_block_size), shared_bytes, 0, d_pos, d_vel, d_tag, box, d_boxes, d_ranges, num_boxes, N_tot, type, first_tag, first_idx, vel_factor, timestep, seed); return hipSuccess; } } // end namespace gpu } // end namespace mpcd
a8ed6391e42d54d7231a3d9af76c66a404558930.cu
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/SlitGeometryFillerGPU.cu * \brief Defines GPU functions and kernels used by mpcd::SlitGeometryFillerGPU */ #include "ParticleDataUtilities.h" #include "SlitPoreGeometryFillerGPU.cuh" #include "hoomd/RNGIdentifiers.h" #include "hoomd/RandomNumbers.h" namespace mpcd { namespace gpu { namespace kernel { /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param vel_factor Scale factor for uniform normal velocities consistent with particle mass / * temperature \param timestep Current timestep \param seed User seed to PRNG for drawing velocities * * \b Implementation: * * Using one thread per particle, the thread is assigned to a fill range matching a 2d bounding box, * which defines a cuboid of volume to fill. The thread index is translated into a particle tag * and local particle index. A random position is drawn within the cuboid. A random velocity * is drawn consistent with the speed of the moving wall. */ __global__ void slit_pore_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const BoxDim box, const Scalar4* d_boxes, const uint2* d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar vel_factor, const uint64_t timestep, const uint16_t seed) { // num_boxes should be 6, so this will all fit in shmem extern __shared__ char s_data[]; Scalar4* s_boxes = (Scalar4*)(&s_data[0]); uint2* s_ranges = (uint2*)(&s_data[sizeof(Scalar4) * num_boxes]); for (unsigned int offset = 0; offset < num_boxes; offset += blockDim.x) { if (offset + threadIdx.x < num_boxes) { const unsigned int boxid = offset + threadIdx.x; s_boxes[boxid] = d_boxes[boxid]; s_ranges[boxid] = d_ranges[boxid]; } } __syncthreads(); // one thread per particle const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N_tot) return; // linear search for box matching thread (num_boxes is small) Scalar3 lo = box.getLo(); Scalar3 hi = box.getHi(); for (unsigned int boxid = 0; boxid < num_boxes; ++boxid) { const uint2 range = s_ranges[boxid]; if (idx >= range.x && idx < range.y) { const Scalar4 fillbox = s_boxes[boxid]; lo.x = fillbox.x; hi.x = fillbox.y; lo.z = fillbox.z; hi.z = fillbox.w; break; } } // particle tag and index const unsigned int tag = first_tag + idx; const unsigned int pidx = first_idx + idx; d_tag[pidx] = tag; // initialize random number generator for positions and velocity hoomd::RandomGenerator rng( hoomd::Seed(hoomd::RNGIdentifier::SlitPoreGeometryFiller, timestep, seed), hoomd::Counter(tag)); d_pos[pidx] = make_scalar4(hoomd::UniformDistribution<Scalar>(lo.x, hi.x)(rng), hoomd::UniformDistribution<Scalar>(lo.y, hi.y)(rng), hoomd::UniformDistribution<Scalar>(lo.z, hi.z)(rng), __int_as_scalar(type)); hoomd::NormalDistribution<Scalar> gen(vel_factor, 0.0); Scalar3 vel; gen(vel.x, vel.y, rng); vel.z = gen(rng); // TODO: should these be given zero net-momentum contribution (relative to the frame of // reference?) d_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL)); } } // end namespace kernel /*! * \param d_pos Particle positions * \param d_vel Particle velocities * \param d_tag Particle tags * \param box Local simulation box * \param d_boxes List of 2d bounding boxes for filling * \param d_ranges Particle ranges for each box * \param num_boxes Number of bounding boxes to fill * \param N_tot Total number of particles * \param mass Mass of fill particles * \param type Type of fill particles * \param first_tag First tag of filled particles * \param first_idx First (local) particle index of filled particles * \param kT Temperature for fill particles * \param timestep Current timestep * \param seed User seed to PRNG for drawing velocities * \param block_size Number of threads per block * * \sa kernel::slit_pore_draw_particles */ cudaError_t slit_pore_draw_particles(Scalar4* d_pos, Scalar4* d_vel, unsigned int* d_tag, const BoxDim& box, const Scalar4* d_boxes, const uint2* d_ranges, const unsigned int num_boxes, const unsigned int N_tot, const Scalar mass, const unsigned int type, const unsigned int first_tag, const unsigned int first_idx, const Scalar kT, const uint64_t timestep, const uint16_t seed, const unsigned int block_size) { if (N_tot == 0) return cudaSuccess; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)kernel::slit_pore_draw_particles); max_block_size = attr.maxThreadsPerBlock; } // precompute factor for rescaling the velocities since it is the same for all particles const Scalar vel_factor = fast::sqrt(kT / mass); unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N_tot / run_block_size + 1); const size_t shared_bytes = num_boxes * (sizeof(Scalar4) + sizeof(uint2)); kernel::slit_pore_draw_particles<<<grid, run_block_size, shared_bytes>>>(d_pos, d_vel, d_tag, box, d_boxes, d_ranges, num_boxes, N_tot, type, first_tag, first_idx, vel_factor, timestep, seed); return cudaSuccess; } } // end namespace gpu } // end namespace mpcd