hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c6dcb4286f00d44bbaab36fad5e20103960b88c6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//By: Goodwyn, Zach & Mueller, Jerome
//This program uses a CUDA-enabled GPU to brute-force solve the travelling salesman problem
#include<stdio.h>
#include"math.h"
#include<stdlib.h>
#include<time.h>
#include<sys/time.h> // Used for timing this.
#include<unistd.h> // Used for timing this.
#include<algorithm>//needed for next_permutation
#include<climits>//needed for MAX_INT
#include<assert.h>
#include<time.h>
#include<sys/time.h> // Used for timing this.
#include<unistd.h> // Used for timing this.
#define MAX_PATH 1000
//#define NUM_CORES 96
__host__ __device__ void convertFact(unsigned long long, int*);
__host__ __device__ void orderPermutation(int*, unsigned long long, int);
unsigned long long factorial(unsigned long long);
int* generateArray(int num_Cities);
int charToInt(char* temp_Input);
void checkCuda(hipError_t problem, int id);
__global__ void permute(int* device_Matrix, short* device_Cities, long long* device_Perm_Start_Indecies, int* device_Least_Cost_Array,long long* device_Least_Path_Array, int num_Cities, int threads_Per_Block);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class _Ty1, class _Ty2>
__device__ inline bool prayer(const _Ty1& _Left, const _Ty2& _Right)
{ // test if _Left < _Right and operator< is strict weak ordering
// cout <<"!!" << endl;
if (!(_Left < _Right))
return (false);
else if (_Right < _Left);
//cout << "help us" << endl;
//_DEBUG_ERROR2("invalid operator<", _File, _Line);
return (true);
}
__device__ inline void swap(short* a, short* b)
{
//cout <<"swap!!" << endl;
int temp = *a;
*a = *b;
*b = temp;
}
__device__ inline void reverse(short* a, short* b)
{
//cout <<"reverse!!" << endl;
b--;
while(a < b)
{
swap(a,b);
a++;
b--;
//cout << "swapping: " << *a << " with " << *b << endl;
}
}
__device__ inline bool nextPerm(short* _First, short* _Last)
{ // permute and test for pure ascending, using operator<
short* _Next = _Last;
if (_First == _Last || _First == --_Next)
return (false);
for (; ; )
{ // find rightmost element smaller than successor
// cout <<"!!" << endl;
short* _Next1 = _Next;
if (prayer(*--_Next, *_Next1))
{ // swap with rightmost element that's smaller, flip suffix
short* _Mid = _Last;
for (; !prayer(*_Next, *--_Mid); )
;
swap(_Next, _Mid);
reverse(_Next1, _Last);
return (true);
}
if (_Next == _First)
{ // pure descending, flip all
reverse(_First, _Last);
return (false);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
//returns the factorial for the given number
//x: the index for which factorial number you want.
//returns: x!
unsigned long long factorial(unsigned long long x)
{
if(x == 1)
return 1;
else
return x * factorial(x-1);
}
//converts a number into base factoriaint** adjacency_Matrix = new int*[num_Cities];
//num: the number to convert to base factorial
//digits: a storage array to store the digits of the base factorial number
//numbers are stored in reverse order (so the 2nd digit is in position 1, the third in 2, etc.
//digits[0] will contain the length of the number in digits, since the first number of a base factorial number is always 0
__host__ __device__ void convertFact(unsigned long long num, int* digits)
{
int numDigits = 1;//there is always a spare 0 in these numbers
while(num > 0)
{
digits[numDigits] = num % (numDigits + 1);
numDigits++;
num /= numDigits;
}
digits[0] = numDigits;//because the first digit is always zero, we will store the length of the array in the 0th slot
}
//returns the array transposed to the nth permutation after the given ordering
//toOrder: the set for which you would like to define the nth permutation
//m: the numbered permutation to be stored in toOrder
//size: the size of the array toOrder
//warning: gives unpredictable behavior if n is > the number of unique permutations for toOrder
__host__ __device__ void orderPermutation(short* toOrder, unsigned long long n, int size)
{
int swaps[100];
convertFact(n, swaps);
int location = size - swaps[0];//accounts for leading zeros
int loc = swaps[0] - 1;//used to iterate through the digits of the factoradic number
while(loc > 0)
{
int temp = toOrder[location + swaps[loc]];
for(int x = location+swaps[loc]; x > location; x--)
{
toOrder[x] = toOrder[x - 1];
}
toOrder[location] = temp;
location++;
loc--;
}
}
//returns the array transposed to the nth permutation after the given ordering
//toOrder: the set for which you would like to define the nth permutation
//m: the numbered permutation to be stored in toOrder
//size: the size of the array toOrder
//warning: gives unpredictable behavior if n is > the number of unique permutations for toOrder
__host__ __device__ void orderPermutation(int* toOrder, unsigned long long n, int size)
{
int swaps[100];
convertFact(n, swaps);
int location = size - swaps[0];//accounts for leading zeros
int loc = swaps[0] - 1;//used to iterate through the digits of the factoradic number
while(loc > 0)
{
int temp = toOrder[location + swaps[loc]];
for(int x = location+swaps[loc]; x > location; x--)
{
toOrder[x] = toOrder[x - 1];
}
toOrder[location] = temp;
location++;
loc--;
}
}
//returns a pointer to a "two" dimensional randomly generated symmetrical adjacency matrix
//num_Cities: used to decide how much memory should be allocated
int* generateArray(int num_Cities)
{
int* temp_Matrix = (int*)malloc(num_Cities*num_Cities*sizeof(int*));
/*for(int index = 0; index < num_Cities; index++)
{
temp_Matrix[index] = (int*)malloc(num_Cities*sizeof(int));
}*/
for(int outer = 0; outer < num_Cities; outer++)
{
for(int inner = 0; inner <= outer; inner++)
{
temp_Matrix[outer * num_Cities + inner] = 1 + rand() % MAX_PATH;
temp_Matrix[inner * num_Cities + outer] = temp_Matrix[outer * num_Cities + inner];
}
}
return temp_Matrix;
}
//Converts a character string to an integer
//temp_Input: A cstring (char*) containing a number to translate
//Gives unpredictable, but not undefined behavior if temp_Input contains non-numeric characters,
//or if temp_Input is too large to be held in an int. Does not support negatives or decimals
int charToInt(char* temp_Input)
{
int num_Digit = 0;
int number = 0;
while(temp_Input[num_Digit] != '\0')//loops until the end of the string
{
number = number * 10;//accounts for the place-value of the digits
if(temp_Input[num_Digit] != '0')
{
//in character sets, the character one minus '1' yields zero, '2' minus '1' yields 1, etc.
number += temp_Input[num_Digit] + 1 - '1';
}
num_Digit++;
}
return number;
}
//shorthand to check if a cuda error occured
//problem: hipError_t storing the result of a hipMalloc
//id: an integer id that gets print out with the message, default = -1
void checkCuda(hipError_t problem, int id = -1)
{
if(problem != hipSuccess)
{
printf("%s\n", hipGetErrorString(problem));
printf("Terminating process with id = %d\n", id);
abort();
}
}
//Kernel Function!
//Each thread calculates its starting and ending index, then calculates the total costs of the paths assigned to it, then stores its best result in the device_Least_Cost_Array, and the index of that path in the device_Least_Path_Array
//ALL ARGUMENTS POINT TO THE GPU'S COPY OF THE GIVEN DATA STRUCTURE
//device_Matrix: The adjacency matrix representing the different costs of getting from one city to another
//device_Cities: The array containing EVERY thread's representation of the cities (i.e. each thread gets an array of num_Cities elements containing numbers 1 - [num_Cities-1]) See comments in method body for further comments
//device_Perm_Start_Indecies: The starting locations for a given Block of threads. It is up to the individual thread to calculate which paths to run based on this array and its thread ID
//device_Least_Cost_Array: When a thread has finished its permutations, it writes the cost of the shortest path it found into a unique slot in this array
//device_Least_Path_Array: The corresponding permutation number for the shortest calculated path. Used to retrieve the city-ordering for the best solution
//num_Cities: The number of cities in this instance of the travelling salesman problem
//threads_Per_Block: The number of threads in a given block.
__global__ void permute(int* device_Matrix, short* device_Cities, long long* device_Perm_Start_Indecies, int* device_Least_Cost_Array,long long* device_Least_Path_Array, int num_Cities, int threads_Per_Block)
{
int id = blockIdx.x * threads_Per_Block + threadIdx.x;//this id is unique for each thread
//each thread works with a subset of the device_Cities array, this next statement procures the starting address of this thread's subset
short* cities = device_Cities + (id * num_Cities);
long long index;//which path permutation the thread is calculating
int tot_Sum;//the running cost of the permutation this thread is calculating
long long least_Path = 0;//the permutation id# of the shortest path this thread has found
int least_Cost = 999999;//the least cost solution this thread has so far found
float sectionRatio = (float)1.0/threads_Per_Block;//calculates what portion of the thread's block's workload needs to be completed by this thread
long long block_Perms = device_Perm_Start_Indecies[blockIdx.x + 1] - device_Perm_Start_Indecies[blockIdx.x];//the total number permutations in this thread's block's workload
long long start_Perm = (sectionRatio * threadIdx.x) * block_Perms;//An offset denoting which path permutation number this thread should start to calculate
long long end_Perm = (sectionRatio * (threadIdx.x + 1)) * block_Perms;//An offset denoting the one permutation beyond what this thread should calculate
orderPermutation(cities, device_Perm_Start_Indecies[blockIdx.x] + start_Perm, num_Cities);//initializes this thread's cities array to the appropriate ordering
//loops through all the permutations assigned to this thread
for(index = device_Perm_Start_Indecies[blockIdx.x] + start_Perm ; index < device_Perm_Start_Indecies[blockIdx.x] + end_Perm ; index++)
{
tot_Sum = 0;
int inner;
for(inner = 0; inner < num_Cities; inner++)//for each city, looks up the distance to the next city and adds it to a running sum
{
tot_Sum += device_Matrix[cities[inner] * num_Cities + cities[(inner + 1) % num_Cities]];
}
if(tot_Sum < least_Cost)//updates if the soplution is the best so far
{
least_Cost = tot_Sum;
least_Path = index;
}
/*for(inner = 0; inner < num_Cities; inner++)//resets the cities array for use in orderPermutation
{
cities[inner] = inner;
}
orderPermutation(cities, index + 1, num_Cities);//sets the cities array to the next permutation
*/
nextPerm(cities,cities+num_Cities);
}
//writes this thread's best solutions to the two arrays for transfer back to the host
device_Least_Cost_Array[id] = least_Cost;
device_Least_Path_Array[id] = least_Path;
}
int main(int argc, char* argv[])
{
//initialize timer
struct timeval startTime, endTime;
gettimeofday(&startTime, NULL);
//variables corresponding to the arguments
unsigned int seeder;
int num_Threads;//(threads per block)
int num_Blocks;
int num_Cities;
int total_Threads;
if(argc != 5)//if an improper number of parameters were passed
{
printf("Error: improper number of commands");
printf("arguments: #cities (seed) (requestedCores)");
fflush(stdout);
}
num_Cities = charToInt(argv[1]);
seeder = charToInt(argv[2]);
srand(seeder);
num_Blocks = charToInt(argv[3]);
num_Threads = charToInt(argv[4]);
total_Threads = num_Blocks * num_Threads;
//calculates the starting index for each block
double section_Ratio = double(1)/num_Blocks;
long long total_Perms = factorial(num_Cities);
long long* perm_Start_Indecies = (long long*)malloc((num_Blocks + 1) * sizeof(long long));
int index;
for(index = 0; index < num_Blocks + 1; index++)
{
perm_Start_Indecies[index] = total_Perms * (section_Ratio * index);
// printf("%d index %lld\n", index, perm_Start_Indecies[index]);
// fflush(stdout);
}
//Following section allocates memory on the host and on the device, and transfers the adjacency matrix the cities array, and the starting index array to the device
hipError_t problemo;
long long* device_Perm_Start_Indecies;
problemo = hipMalloc((void**)&device_Perm_Start_Indecies, sizeof(long long) * (num_Blocks + 1));
checkCuda(problemo,1);
problemo = hipMemcpy(device_Perm_Start_Indecies, perm_Start_Indecies, (sizeof(long long) * (num_Blocks + 1)), hipMemcpyHostToDevice);
checkCuda(problemo,2);
int* adjacency_Matrix = generateArray(num_Cities);
/* int foo = 0;
for(foo; foo < (num_Cities * num_Cities); foo++)
{
printf("%d\t",adjacency_Matrix[foo]);
if((foo + 1) % num_Cities == 0)
{
printf("\n");
}
}
*/
int* device_Matrix;
problemo = hipMalloc((void**)&device_Matrix, num_Cities*num_Cities*sizeof(int));
checkCuda(problemo,3);
problemo = hipMemcpy(device_Matrix, adjacency_Matrix,num_Cities*num_Cities*sizeof(int),hipMemcpyHostToDevice);
checkCuda(problemo,4);
int* device_Least_Cost_Array;
problemo = hipMalloc((void**)&device_Least_Cost_Array, total_Threads * sizeof(int));
checkCuda(problemo,5);
long long* device_Least_Path_Array;
problemo = hipMalloc((void**)&device_Least_Path_Array, total_Threads * sizeof(long long));
checkCuda(problemo,6);
short* cities = (short*)malloc(num_Cities * total_Threads * sizeof(short));
for(index = 0; index < total_Threads; index++)//initializes the cities array with the appropriate values
{
int inner = 0;
for(inner = 0; inner < num_Cities; inner++)
{
cities[index * num_Cities + inner] = inner;
}
}
short* device_Cities;
problemo = hipMalloc((void**)&device_Cities, num_Cities * total_Threads * sizeof(short));
checkCuda(problemo,7);
problemo = hipMemcpy(device_Cities, cities, num_Cities * total_Threads * sizeof(short), hipMemcpyHostToDevice);
checkCuda(problemo,8);
int* least_Cost_Array = (int*)malloc(total_Threads * sizeof(int));
long long* least_Path_Array = (long long*)malloc(total_Threads * sizeof(long long));
for(index = 0; index < total_Threads; index++)
{
least_Cost_Array[index] = 2;
least_Path_Array[index] = 2;
}
//printf("fertig!1\n");
//fflush(stdout);
//kernel call//////////////////////////////////////////////////////////////////////////////////
hipLaunchKernelGGL(( permute), dim3(num_Blocks),dim3(num_Threads), 0, 0, device_Matrix, device_Cities, device_Perm_Start_Indecies, device_Least_Cost_Array, device_Least_Path_Array, num_Cities, num_Threads);
checkCuda(hipGetLastError(),13);
//printf("fertig!2\n");
//fflush(stdout);
//retrieves the arrays storing the best results from each threas
problemo = hipMemcpy(least_Cost_Array, device_Least_Cost_Array,total_Threads * sizeof(int), hipMemcpyDeviceToHost);
checkCuda(problemo,9);
problemo = hipMemcpy(least_Path_Array, device_Least_Path_Array,total_Threads * sizeof(long long), hipMemcpyDeviceToHost);
checkCuda(problemo,10);
//initializes an int[] to store the cities of the best path
int* true_Cities = (int*)malloc(num_Cities*sizeof(int));
index = 0;
for(index = 0; index < num_Cities; index++)
{
true_Cities[index] = index;
}
int block_Index;
int temp_Best = 99999999;
int best_Index = 0;
//calculates the best path of those returned by the GPU
for(block_Index = 0; block_Index < total_Threads; block_Index++)
{
//printf("%d << leastCost element %d\n",least_Cost_Array[block_Index], block_Index);
if(least_Cost_Array[block_Index] < temp_Best)
{
best_Index = block_Index;
temp_Best = least_Cost_Array[block_Index];
}
}
//displays the results
//printf("%d << best! from thread %d\n",temp_Best, best_Index);
orderPermutation(true_Cities, least_Path_Array[best_Index], num_Cities);
index = 0;
for(index = 0; index < num_Cities; index++)
{
printf("%d\t", true_Cities[index]);
}
//printf("\nFinished!\n");
//system("PAUSE");
// Timing code adapted from: http://stackoverflow.com/questions/588307/c-obtaining-milliseconds-time-on-linux-clock-doesnt-seem-to-work-properl
gettimeofday(&endTime, NULL);
long timeDelta, startSeconds, startUSeconds, stopSeconds, stopUSeconds, startTotal, stopTotal;
startSeconds = startTime.tv_sec;
stopSeconds = endTime.tv_sec;
startUSeconds = startTime.tv_usec;
stopUSeconds = endTime.tv_usec;
startTotal = (startSeconds * 1000) + (startUSeconds / 1000);
stopTotal = (stopSeconds * 1000) + (stopUSeconds / 1000);
timeDelta = stopTotal - startTotal;
printf(/*"Time: */%d /*milliseconds*/\n",timeDelta);
}
|
c6dcb4286f00d44bbaab36fad5e20103960b88c6.cu
|
//By: Goodwyn, Zach & Mueller, Jerome
//This program uses a CUDA-enabled GPU to brute-force solve the travelling salesman problem
#include<stdio.h>
#include"math.h"
#include<stdlib.h>
#include<time.h>
#include<sys/time.h> // Used for timing this.
#include<unistd.h> // Used for timing this.
#include<algorithm>//needed for next_permutation
#include<climits>//needed for MAX_INT
#include<assert.h>
#include<time.h>
#include<sys/time.h> // Used for timing this.
#include<unistd.h> // Used for timing this.
#define MAX_PATH 1000
//#define NUM_CORES 96
__host__ __device__ void convertFact(unsigned long long, int*);
__host__ __device__ void orderPermutation(int*, unsigned long long, int);
unsigned long long factorial(unsigned long long);
int* generateArray(int num_Cities);
int charToInt(char* temp_Input);
void checkCuda(cudaError_t problem, int id);
__global__ void permute(int* device_Matrix, short* device_Cities, long long* device_Perm_Start_Indecies, int* device_Least_Cost_Array,long long* device_Least_Path_Array, int num_Cities, int threads_Per_Block);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class _Ty1, class _Ty2>
__device__ inline bool prayer(const _Ty1& _Left, const _Ty2& _Right)
{ // test if _Left < _Right and operator< is strict weak ordering
// cout <<"!!" << endl;
if (!(_Left < _Right))
return (false);
else if (_Right < _Left);
//cout << "help us" << endl;
//_DEBUG_ERROR2("invalid operator<", _File, _Line);
return (true);
}
__device__ inline void swap(short* a, short* b)
{
//cout <<"swap!!" << endl;
int temp = *a;
*a = *b;
*b = temp;
}
__device__ inline void reverse(short* a, short* b)
{
//cout <<"reverse!!" << endl;
b--;
while(a < b)
{
swap(a,b);
a++;
b--;
//cout << "swapping: " << *a << " with " << *b << endl;
}
}
__device__ inline bool nextPerm(short* _First, short* _Last)
{ // permute and test for pure ascending, using operator<
short* _Next = _Last;
if (_First == _Last || _First == --_Next)
return (false);
for (; ; )
{ // find rightmost element smaller than successor
// cout <<"!!" << endl;
short* _Next1 = _Next;
if (prayer(*--_Next, *_Next1))
{ // swap with rightmost element that's smaller, flip suffix
short* _Mid = _Last;
for (; !prayer(*_Next, *--_Mid); )
;
swap(_Next, _Mid);
reverse(_Next1, _Last);
return (true);
}
if (_Next == _First)
{ // pure descending, flip all
reverse(_First, _Last);
return (false);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
//returns the factorial for the given number
//x: the index for which factorial number you want.
//returns: x!
unsigned long long factorial(unsigned long long x)
{
if(x == 1)
return 1;
else
return x * factorial(x-1);
}
//converts a number into base factoriaint** adjacency_Matrix = new int*[num_Cities];
//num: the number to convert to base factorial
//digits: a storage array to store the digits of the base factorial number
//numbers are stored in reverse order (so the 2nd digit is in position 1, the third in 2, etc.
//digits[0] will contain the length of the number in digits, since the first number of a base factorial number is always 0
__host__ __device__ void convertFact(unsigned long long num, int* digits)
{
int numDigits = 1;//there is always a spare 0 in these numbers
while(num > 0)
{
digits[numDigits] = num % (numDigits + 1);
numDigits++;
num /= numDigits;
}
digits[0] = numDigits;//because the first digit is always zero, we will store the length of the array in the 0th slot
}
//returns the array transposed to the nth permutation after the given ordering
//toOrder: the set for which you would like to define the nth permutation
//m: the numbered permutation to be stored in toOrder
//size: the size of the array toOrder
//warning: gives unpredictable behavior if n is > the number of unique permutations for toOrder
__host__ __device__ void orderPermutation(short* toOrder, unsigned long long n, int size)
{
int swaps[100];
convertFact(n, swaps);
int location = size - swaps[0];//accounts for leading zeros
int loc = swaps[0] - 1;//used to iterate through the digits of the factoradic number
while(loc > 0)
{
int temp = toOrder[location + swaps[loc]];
for(int x = location+swaps[loc]; x > location; x--)
{
toOrder[x] = toOrder[x - 1];
}
toOrder[location] = temp;
location++;
loc--;
}
}
//returns the array transposed to the nth permutation after the given ordering
//toOrder: the set for which you would like to define the nth permutation
//m: the numbered permutation to be stored in toOrder
//size: the size of the array toOrder
//warning: gives unpredictable behavior if n is > the number of unique permutations for toOrder
__host__ __device__ void orderPermutation(int* toOrder, unsigned long long n, int size)
{
int swaps[100];
convertFact(n, swaps);
int location = size - swaps[0];//accounts for leading zeros
int loc = swaps[0] - 1;//used to iterate through the digits of the factoradic number
while(loc > 0)
{
int temp = toOrder[location + swaps[loc]];
for(int x = location+swaps[loc]; x > location; x--)
{
toOrder[x] = toOrder[x - 1];
}
toOrder[location] = temp;
location++;
loc--;
}
}
//returns a pointer to a "two" dimensional randomly generated symmetrical adjacency matrix
//num_Cities: used to decide how much memory should be allocated
int* generateArray(int num_Cities)
{
int* temp_Matrix = (int*)malloc(num_Cities*num_Cities*sizeof(int*));
/*for(int index = 0; index < num_Cities; index++)
{
temp_Matrix[index] = (int*)malloc(num_Cities*sizeof(int));
}*/
for(int outer = 0; outer < num_Cities; outer++)
{
for(int inner = 0; inner <= outer; inner++)
{
temp_Matrix[outer * num_Cities + inner] = 1 + rand() % MAX_PATH;
temp_Matrix[inner * num_Cities + outer] = temp_Matrix[outer * num_Cities + inner];
}
}
return temp_Matrix;
}
//Converts a character string to an integer
//temp_Input: A cstring (char*) containing a number to translate
//Gives unpredictable, but not undefined behavior if temp_Input contains non-numeric characters,
//or if temp_Input is too large to be held in an int. Does not support negatives or decimals
int charToInt(char* temp_Input)
{
int num_Digit = 0;
int number = 0;
while(temp_Input[num_Digit] != '\0')//loops until the end of the string
{
number = number * 10;//accounts for the place-value of the digits
if(temp_Input[num_Digit] != '0')
{
//in character sets, the character one minus '1' yields zero, '2' minus '1' yields 1, etc.
number += temp_Input[num_Digit] + 1 - '1';
}
num_Digit++;
}
return number;
}
//shorthand to check if a cuda error occured
//problem: cudaError_t storing the result of a cudaMalloc
//id: an integer id that gets print out with the message, default = -1
void checkCuda(cudaError_t problem, int id = -1)
{
if(problem != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(problem));
printf("Terminating process with id = %d\n", id);
abort();
}
}
//Kernel Function!
//Each thread calculates its starting and ending index, then calculates the total costs of the paths assigned to it, then stores its best result in the device_Least_Cost_Array, and the index of that path in the device_Least_Path_Array
//ALL ARGUMENTS POINT TO THE GPU'S COPY OF THE GIVEN DATA STRUCTURE
//device_Matrix: The adjacency matrix representing the different costs of getting from one city to another
//device_Cities: The array containing EVERY thread's representation of the cities (i.e. each thread gets an array of num_Cities elements containing numbers 1 - [num_Cities-1]) See comments in method body for further comments
//device_Perm_Start_Indecies: The starting locations for a given Block of threads. It is up to the individual thread to calculate which paths to run based on this array and its thread ID
//device_Least_Cost_Array: When a thread has finished its permutations, it writes the cost of the shortest path it found into a unique slot in this array
//device_Least_Path_Array: The corresponding permutation number for the shortest calculated path. Used to retrieve the city-ordering for the best solution
//num_Cities: The number of cities in this instance of the travelling salesman problem
//threads_Per_Block: The number of threads in a given block.
__global__ void permute(int* device_Matrix, short* device_Cities, long long* device_Perm_Start_Indecies, int* device_Least_Cost_Array,long long* device_Least_Path_Array, int num_Cities, int threads_Per_Block)
{
int id = blockIdx.x * threads_Per_Block + threadIdx.x;//this id is unique for each thread
//each thread works with a subset of the device_Cities array, this next statement procures the starting address of this thread's subset
short* cities = device_Cities + (id * num_Cities);
long long index;//which path permutation the thread is calculating
int tot_Sum;//the running cost of the permutation this thread is calculating
long long least_Path = 0;//the permutation id# of the shortest path this thread has found
int least_Cost = 999999;//the least cost solution this thread has so far found
float sectionRatio = (float)1.0/threads_Per_Block;//calculates what portion of the thread's block's workload needs to be completed by this thread
long long block_Perms = device_Perm_Start_Indecies[blockIdx.x + 1] - device_Perm_Start_Indecies[blockIdx.x];//the total number permutations in this thread's block's workload
long long start_Perm = (sectionRatio * threadIdx.x) * block_Perms;//An offset denoting which path permutation number this thread should start to calculate
long long end_Perm = (sectionRatio * (threadIdx.x + 1)) * block_Perms;//An offset denoting the one permutation beyond what this thread should calculate
orderPermutation(cities, device_Perm_Start_Indecies[blockIdx.x] + start_Perm, num_Cities);//initializes this thread's cities array to the appropriate ordering
//loops through all the permutations assigned to this thread
for(index = device_Perm_Start_Indecies[blockIdx.x] + start_Perm ; index < device_Perm_Start_Indecies[blockIdx.x] + end_Perm ; index++)
{
tot_Sum = 0;
int inner;
for(inner = 0; inner < num_Cities; inner++)//for each city, looks up the distance to the next city and adds it to a running sum
{
tot_Sum += device_Matrix[cities[inner] * num_Cities + cities[(inner + 1) % num_Cities]];
}
if(tot_Sum < least_Cost)//updates if the soplution is the best so far
{
least_Cost = tot_Sum;
least_Path = index;
}
/*for(inner = 0; inner < num_Cities; inner++)//resets the cities array for use in orderPermutation
{
cities[inner] = inner;
}
orderPermutation(cities, index + 1, num_Cities);//sets the cities array to the next permutation
*/
nextPerm(cities,cities+num_Cities);
}
//writes this thread's best solutions to the two arrays for transfer back to the host
device_Least_Cost_Array[id] = least_Cost;
device_Least_Path_Array[id] = least_Path;
}
int main(int argc, char* argv[])
{
//initialize timer
struct timeval startTime, endTime;
gettimeofday(&startTime, NULL);
//variables corresponding to the arguments
unsigned int seeder;
int num_Threads;//(threads per block)
int num_Blocks;
int num_Cities;
int total_Threads;
if(argc != 5)//if an improper number of parameters were passed
{
printf("Error: improper number of commands");
printf("arguments: #cities (seed) (requestedCores)");
fflush(stdout);
}
num_Cities = charToInt(argv[1]);
seeder = charToInt(argv[2]);
srand(seeder);
num_Blocks = charToInt(argv[3]);
num_Threads = charToInt(argv[4]);
total_Threads = num_Blocks * num_Threads;
//calculates the starting index for each block
double section_Ratio = double(1)/num_Blocks;
long long total_Perms = factorial(num_Cities);
long long* perm_Start_Indecies = (long long*)malloc((num_Blocks + 1) * sizeof(long long));
int index;
for(index = 0; index < num_Blocks + 1; index++)
{
perm_Start_Indecies[index] = total_Perms * (section_Ratio * index);
// printf("%d index %lld\n", index, perm_Start_Indecies[index]);
// fflush(stdout);
}
//Following section allocates memory on the host and on the device, and transfers the adjacency matrix the cities array, and the starting index array to the device
cudaError_t problemo;
long long* device_Perm_Start_Indecies;
problemo = cudaMalloc((void**)&device_Perm_Start_Indecies, sizeof(long long) * (num_Blocks + 1));
checkCuda(problemo,1);
problemo = cudaMemcpy(device_Perm_Start_Indecies, perm_Start_Indecies, (sizeof(long long) * (num_Blocks + 1)), cudaMemcpyHostToDevice);
checkCuda(problemo,2);
int* adjacency_Matrix = generateArray(num_Cities);
/* int foo = 0;
for(foo; foo < (num_Cities * num_Cities); foo++)
{
printf("%d\t",adjacency_Matrix[foo]);
if((foo + 1) % num_Cities == 0)
{
printf("\n");
}
}
*/
int* device_Matrix;
problemo = cudaMalloc((void**)&device_Matrix, num_Cities*num_Cities*sizeof(int));
checkCuda(problemo,3);
problemo = cudaMemcpy(device_Matrix, adjacency_Matrix,num_Cities*num_Cities*sizeof(int),cudaMemcpyHostToDevice);
checkCuda(problemo,4);
int* device_Least_Cost_Array;
problemo = cudaMalloc((void**)&device_Least_Cost_Array, total_Threads * sizeof(int));
checkCuda(problemo,5);
long long* device_Least_Path_Array;
problemo = cudaMalloc((void**)&device_Least_Path_Array, total_Threads * sizeof(long long));
checkCuda(problemo,6);
short* cities = (short*)malloc(num_Cities * total_Threads * sizeof(short));
for(index = 0; index < total_Threads; index++)//initializes the cities array with the appropriate values
{
int inner = 0;
for(inner = 0; inner < num_Cities; inner++)
{
cities[index * num_Cities + inner] = inner;
}
}
short* device_Cities;
problemo = cudaMalloc((void**)&device_Cities, num_Cities * total_Threads * sizeof(short));
checkCuda(problemo,7);
problemo = cudaMemcpy(device_Cities, cities, num_Cities * total_Threads * sizeof(short), cudaMemcpyHostToDevice);
checkCuda(problemo,8);
int* least_Cost_Array = (int*)malloc(total_Threads * sizeof(int));
long long* least_Path_Array = (long long*)malloc(total_Threads * sizeof(long long));
for(index = 0; index < total_Threads; index++)
{
least_Cost_Array[index] = 2;
least_Path_Array[index] = 2;
}
//printf("fertig!1\n");
//fflush(stdout);
//kernel call//////////////////////////////////////////////////////////////////////////////////
permute<<<num_Blocks,num_Threads>>>(device_Matrix, device_Cities, device_Perm_Start_Indecies, device_Least_Cost_Array, device_Least_Path_Array, num_Cities, num_Threads);
checkCuda(cudaGetLastError(),13);
//printf("fertig!2\n");
//fflush(stdout);
//retrieves the arrays storing the best results from each threas
problemo = cudaMemcpy(least_Cost_Array, device_Least_Cost_Array,total_Threads * sizeof(int), cudaMemcpyDeviceToHost);
checkCuda(problemo,9);
problemo = cudaMemcpy(least_Path_Array, device_Least_Path_Array,total_Threads * sizeof(long long), cudaMemcpyDeviceToHost);
checkCuda(problemo,10);
//initializes an int[] to store the cities of the best path
int* true_Cities = (int*)malloc(num_Cities*sizeof(int));
index = 0;
for(index = 0; index < num_Cities; index++)
{
true_Cities[index] = index;
}
int block_Index;
int temp_Best = 99999999;
int best_Index = 0;
//calculates the best path of those returned by the GPU
for(block_Index = 0; block_Index < total_Threads; block_Index++)
{
//printf("%d << leastCost element %d\n",least_Cost_Array[block_Index], block_Index);
if(least_Cost_Array[block_Index] < temp_Best)
{
best_Index = block_Index;
temp_Best = least_Cost_Array[block_Index];
}
}
//displays the results
//printf("%d << best! from thread %d\n",temp_Best, best_Index);
orderPermutation(true_Cities, least_Path_Array[best_Index], num_Cities);
index = 0;
for(index = 0; index < num_Cities; index++)
{
printf("%d\t", true_Cities[index]);
}
//printf("\nFinished!\n");
//system("PAUSE");
// Timing code adapted from: http://stackoverflow.com/questions/588307/c-obtaining-milliseconds-time-on-linux-clock-doesnt-seem-to-work-properl
gettimeofday(&endTime, NULL);
long timeDelta, startSeconds, startUSeconds, stopSeconds, stopUSeconds, startTotal, stopTotal;
startSeconds = startTime.tv_sec;
stopSeconds = endTime.tv_sec;
startUSeconds = startTime.tv_usec;
stopUSeconds = endTime.tv_usec;
startTotal = (startSeconds * 1000) + (startUSeconds / 1000);
stopTotal = (stopSeconds * 1000) + (stopUSeconds / 1000);
timeDelta = stopTotal - startTotal;
printf(/*"Time: */%d /*milliseconds*/\n",timeDelta);
}
|
57969c95d5c3df5583b5a54936b9926fbd6a613a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaGesv(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<class scalar_t>
void magmaGesvBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetrfBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("getrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<>
void magmaGesvBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGesvBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGesv<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaGesv<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaGetrfBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetrfBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
#endif
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ gesv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gesv(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("gesv: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaGesv<scalar_t>(n, nrhs, A_data, n, ipiv.data<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaGesvBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{
apply_gesv<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "gesv");
} else {
singleCheckErrors(infos[0], "gesv");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_inverse(Tensor &self, Tensor &self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaGetrfBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
magmaGetriBatched<scalar_t>(
n, self_array, n, ipiv_array, self_inv_array,
n, info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
// Because this is out-of-place inverse, the predefined macros will
// not work
Tensor _inverse_helper_cuda(const Tensor& self) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.type(), "inverse", [&]{
apply_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse");
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array, n, b_array, n,
info_tmp, batch_size, magma_queue);
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.type(), "cholesky_solve", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
AT_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array, n, info_array,
batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.type(), "cholesky", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky");
} else {
singleCheckErrors(infos[0], "cholesky");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
57969c95d5c3df5583b5a54936b9926fbd6a613a.cu
|
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaGesv(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<class scalar_t>
void magmaGesvBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetrfBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("getrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<>
void magmaGesvBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGesvBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGesv<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaGesv<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaGetrfBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetrfBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
#endif
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ gesv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gesv(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("gesv: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaGesv<scalar_t>(n, nrhs, A_data, n, ipiv.data<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaGesvBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{
apply_gesv<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "gesv");
} else {
singleCheckErrors(infos[0], "gesv");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_inverse(Tensor &self, Tensor &self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaGetrfBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
magmaGetriBatched<scalar_t>(
n, self_array, n, ipiv_array, self_inv_array,
n, info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
// Because this is out-of-place inverse, the predefined macros will
// not work
Tensor _inverse_helper_cuda(const Tensor& self) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.type(), "inverse", [&]{
apply_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse");
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array, n, b_array, n,
info_tmp, batch_size, magma_queue);
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.type(), "cholesky_solve", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
AT_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array, n, info_array,
batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.type(), "cholesky", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky");
} else {
singleCheckErrors(infos[0], "cholesky");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
cd705d3d1ab942aec0ad6006c6b660f87dea5952.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeqr2x_gpu-v4.cu, normal z -> s, Mon Jun 25 18:24:14 2018
*/
#include "magma_internal.h"
#include "commonblas_s.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
SGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This expert routine requires two more arguments than the standard
sgeqr2, namely, dT and ddA, explained below. The storage for A is
also not as in the LAPACK's sgeqr2 routine (see below).
The first is used to output the triangular
n x n factor T of the block reflector used in the factorization.
The second holds the diagonal nxn blocks of A, i.e., the diagonal
submatrices of R. This routine implements the left looking QR.
This version adds internal blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA REAL array, dimension (LDA,N)
On entry, the m by n matrix A.
On exit, the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
\n
the elements on and above the diagonal of the array
contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
@param[in]
ldda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
dtau REAL array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
dT REAL array, dimension N x N.
Stores the triangular N x N factor T of the block reflector
used in the factorization. The lower triangular part is 0.
@param[out]
ddA REAL array, dimension N x N.
Stores the elements of the upper N x N diagonal block of A.
LAPACK stores this array in A. There are 0s below the diagonal.
@param
dwork (workspace) REAL array, dimension (3 N)
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v**H
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqr2
*******************************************************************************/
extern "C" magma_int_t
magma_sgeqr2x4_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dtau,
magmaFloat_ptr dT,
magmaFloat_ptr ddA,
magmaFloat_ptr dwork,
magma_queue_t queue,
magma_int_t *info)
{
#define dA(i_,j_) (dA + (j_)*(ldda) + (i_))
#define dT(i_,j_) (dT + (j_)*(k) + (i_))
#define BS 32
magma_int_t i, k;
magmaFloat_ptr dnorm = (magmaFloat_ptr)dwork;
magmaFloat_ptr dwork2 = (magmaFloat_ptr)(dwork + 2*n);
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Compute the norms of the trailing columns */
k = min(m,n);
magmablas_snrm2_cols( m, k, dA(0,0), ldda, dnorm, queue );
for (magma_int_t b=0; b < k; b += BS) {
for (i = b; i < min(k, b+BS); ++i) {
/* Apply H**H to A(:,i) from the left */
if (i-b > 0) {
/* Compute the (i-1)th column of T */
if ( i-1 > 0 ) {
hipLaunchKernelGGL(( magma_sgemv_kernel3)
, dim3(i-1), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
hipLaunchKernelGGL(( magma_strmv_kernel2)
, dim3(i-1), dim3(i-1), 0, queue->cuda_stream() ,
dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_sgemv_kernel1)
, dim3(i-b), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-b, dA(b, b), ldda, dA(b,i), dwork2);
/* dwork = T**H dwork2 */
hipLaunchKernelGGL(( magma_strmv_tkernel)
, dim3(i-b), dim3(i-b), 0, queue->cuda_stream() ,
dT(b,b), k, dwork2, dwork2+i-b);
/* c = c - V dwork2 */
if ( m-b > 0 ) {
dim3 blocks3( magma_ceildiv( m-b, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_sgemv_kernel2)
, dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() ,
m-b, i-b, dA(b,b), ldda, dwork2+i-b, dA(b, i));
}
}
/* Adjust the dnorm[i] to hold the norm of A(i:m,i) */
if ( i > 0 ) {
hipLaunchKernelGGL(( magma_snrm2_adjust_kernel)
, dim3(1), dim3(i), 0, queue->cuda_stream() ,
dnorm+i, dA(0, i));
}
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
1. 1 is not yet put on the diagonal of A
2. Elements above the diagonal are copied in ddA and
the ones in A are set to zero
3. update T */
magma_slarfgx_gpu( m-i, dA(i, i), dA(min(i+1,m),i), dtau+i,
dnorm+i, ddA + i + i*n, i, queue );
if (i == 0) {
float tt = MAGMA_S_ONE;
magmablas_slacpy( MagmaFull, 1, 1, dtau, 1, dT(0,0), 1, queue );
magma_ssetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, queue );
}
}
if ( i-1 > 0 ) {
hipLaunchKernelGGL(( magma_sgemv_kernel3)
, dim3(i-1), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
hipLaunchKernelGGL(( magma_strmv_kernel2)
, dim3(i-1), dim3(i-1), 0, queue->cuda_stream() ,
dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* Apply the transformations to the trailing matrix. */
//magma_slarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise,
magma_slarfb2_gpu(
m-b, k-i, BS,
dA(b, b), ldda, dT+b+b*k, k,
dA(b, i), ldda, dwork2, k-i, queue );
}
return *info;
} /* magma_sgeqr2 */
|
cd705d3d1ab942aec0ad6006c6b660f87dea5952.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeqr2x_gpu-v4.cu, normal z -> s, Mon Jun 25 18:24:14 2018
*/
#include "magma_internal.h"
#include "commonblas_s.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
SGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This expert routine requires two more arguments than the standard
sgeqr2, namely, dT and ddA, explained below. The storage for A is
also not as in the LAPACK's sgeqr2 routine (see below).
The first is used to output the triangular
n x n factor T of the block reflector used in the factorization.
The second holds the diagonal nxn blocks of A, i.e., the diagonal
submatrices of R. This routine implements the left looking QR.
This version adds internal blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA REAL array, dimension (LDA,N)
On entry, the m by n matrix A.
On exit, the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
\n
the elements on and above the diagonal of the array
contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
@param[in]
ldda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
dtau REAL array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
dT REAL array, dimension N x N.
Stores the triangular N x N factor T of the block reflector
used in the factorization. The lower triangular part is 0.
@param[out]
ddA REAL array, dimension N x N.
Stores the elements of the upper N x N diagonal block of A.
LAPACK stores this array in A. There are 0s below the diagonal.
@param
dwork (workspace) REAL array, dimension (3 N)
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v**H
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqr2
*******************************************************************************/
extern "C" magma_int_t
magma_sgeqr2x4_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_ptr dA, magma_int_t ldda,
magmaFloat_ptr dtau,
magmaFloat_ptr dT,
magmaFloat_ptr ddA,
magmaFloat_ptr dwork,
magma_queue_t queue,
magma_int_t *info)
{
#define dA(i_,j_) (dA + (j_)*(ldda) + (i_))
#define dT(i_,j_) (dT + (j_)*(k) + (i_))
#define BS 32
magma_int_t i, k;
magmaFloat_ptr dnorm = (magmaFloat_ptr)dwork;
magmaFloat_ptr dwork2 = (magmaFloat_ptr)(dwork + 2*n);
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Compute the norms of the trailing columns */
k = min(m,n);
magmablas_snrm2_cols( m, k, dA(0,0), ldda, dnorm, queue );
for (magma_int_t b=0; b < k; b += BS) {
for (i = b; i < min(k, b+BS); ++i) {
/* Apply H**H to A(:,i) from the left */
if (i-b > 0) {
/* Compute the (i-1)th column of T */
if ( i-1 > 0 ) {
magma_sgemv_kernel3
<<< i-1, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
magma_strmv_kernel2
<<< i-1, i-1, 0, queue->cuda_stream() >>>
( dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* dwork = V**H c */
magma_sgemv_kernel1
<<< i-b, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m-b, dA(b, b), ldda, dA(b,i), dwork2);
/* dwork = T**H dwork2 */
magma_strmv_tkernel
<<< i-b, i-b, 0, queue->cuda_stream() >>>
(dT(b,b), k, dwork2, dwork2+i-b);
/* c = c - V dwork2 */
if ( m-b > 0 ) {
dim3 blocks3( magma_ceildiv( m-b, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
magma_sgemv_kernel2
<<< blocks3, threads3, 0, queue->cuda_stream() >>>
(m-b, i-b, dA(b,b), ldda, dwork2+i-b, dA(b, i));
}
}
/* Adjust the dnorm[i] to hold the norm of A(i:m,i) */
if ( i > 0 ) {
magma_snrm2_adjust_kernel
<<< 1, i, 0, queue->cuda_stream() >>>
(dnorm+i, dA(0, i));
}
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
1. 1 is not yet put on the diagonal of A
2. Elements above the diagonal are copied in ddA and
the ones in A are set to zero
3. update T */
magma_slarfgx_gpu( m-i, dA(i, i), dA(min(i+1,m),i), dtau+i,
dnorm+i, ddA + i + i*n, i, queue );
if (i == 0) {
float tt = MAGMA_S_ONE;
magmablas_slacpy( MagmaFull, 1, 1, dtau, 1, dT(0,0), 1, queue );
magma_ssetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, queue );
}
}
if ( i-1 > 0 ) {
magma_sgemv_kernel3
<<< i-1, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
magma_strmv_kernel2
<<< i-1, i-1, 0, queue->cuda_stream() >>>
( dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* Apply the transformations to the trailing matrix. */
//magma_slarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise,
magma_slarfb2_gpu(
m-b, k-i, BS,
dA(b, b), ldda, dT+b+b*k, k,
dA(b, i), ldda, dwork2, k-i, queue );
}
return *info;
} /* magma_sgeqr2 */
|
3a41e493a67ddbe5a977cfb46a4d07e17b7b5216.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile iki
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifdef __HIPCC__
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "chain.h"
#include "constants.h"
#include "cuda_utils.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "utils.h"
#include "score_database_gpu_long.h"
#define THREADS 64
#define BLOCKS 240
#define MAX_THREADS THREADS
#define INT4_ZERO make_int4(0, 0, 0, 0)
#define INT4_SCORE_MIN make_int4(SCORE_MIN, SCORE_MIN, SCORE_MIN, SCORE_MIN)
typedef struct GpuDatabase {
int card;
char* codes;
int* starts;
int* lengths;
int* indexes;
int* scores;
int2* hBus;
} GpuDatabase;
struct LongDatabase {
Chain** database;
int databaseLen;
int length;
int* order;
int* positions;
int* indexes;
GpuDatabase* gpuDatabases;
int gpuDatabasesLen;
};
typedef struct Context {
int* scores;
int type;
Chain** queries;
int queriesLen;
LongDatabase* longDatabase;
Scorer* scorer;
int* indexes;
int indexesLen;
int* cards;
int cardsLen;
} Context;
typedef struct QueryProfile {
int height;
int width;
int length;
char4* data;
size_t size;
} QueryProfile;
typedef struct QueryProfileGpu {
hipArray* data;
} QueryProfileGpu;
typedef void (*ScoringFunction)(char*, int*, int*, int*, int*, int2*);
typedef struct KernelContext {
int* scores;
ScoringFunction scoringFunction;
QueryProfile* queryProfile;
Chain* query;
LongDatabase* longDatabase;
Scorer* scorer;
int* indexes;
int indexesLen;
int card;
} KernelContext;
typedef struct KernelContexts {
KernelContext* contexts;
int contextsLen;
long long cells;
} KernelContexts;
typedef struct Atom {
int mch;
int2 up;
int4 lScr;
int4 lAff;
int4 rScr;
int4 rAff;
} Atom;
static __constant__ int gapOpen_;
static __constant__ int gapExtend_;
static __constant__ int rows_;
static __constant__ int rowsPadded_;
static __constant__ int length_;
static __constant__ int iters_;
texture<char4, 2, hipReadModeElementType> qpTexture;
//******************************************************************************
// PUBLIC
extern LongDatabase* longDatabaseCreate(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen);
extern void longDatabaseDelete(LongDatabase* longDatabase);
extern void scoreLongDatabaseGpu(int* scores, int type, Chain* query,
LongDatabase* longDatabase, Scorer* scorer, int* indexes, int indexesLen,
int* cards, int cardsLen, Thread* thread);
extern void scoreLongDatabasesGpu(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread);
//******************************************************************************
//******************************************************************************
// PRIVATE
// constructor
static LongDatabase* createDatabase(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen);
// destructor
static void deleteDatabase(LongDatabase* database);
// scoring
static void scoreDatabase(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread);
static void* scoreDatabaseThread(void* param);
static void scoreDatabaseMulti(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen);
static void scoreDatabaseSingle(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen);
// cpu kernels
static void* kernelThread(void* param);
static void* kernelsThread(void* param);
// gpu kernels
__global__ void hwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__global__ void nwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__global__ void ovSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__global__ void swSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__device__ static int gap(int index);
__device__ void hwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
__device__ void nwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
__device__ void ovSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
__device__ void swSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
// query profile
static QueryProfile* createQueryProfile(Chain* query, Scorer* scorer);
static void deleteQueryProfile(QueryProfile* queryProfile);
static QueryProfileGpu* createQueryProfileGpu(QueryProfile* queryProfile);
static void deleteQueryProfileGpu(QueryProfileGpu* queryProfileGpu);
//******************************************************************************
//******************************************************************************
// PUBLIC
//------------------------------------------------------------------------------
// CONSTRUCTOR, DESTRUCTOR
extern LongDatabase* longDatabaseCreate(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen) {
return createDatabase(database, databaseLen, minLen, maxLen, cards, cardsLen);
}
extern void longDatabaseDelete(LongDatabase* longDatabase) {
deleteDatabase(longDatabase);
}
extern size_t longDatabaseGpuMemoryConsumption(Chain** database, int databaseLen,
int minLen, int maxLen) {
int length = 0;
long codesLen = 0;
for (int i = 0; i < databaseLen; ++i) {
const int n = chainGetLength(database[i]);
if (n >= minLen && n < maxLen) {
codesLen += n;
length++;
}
}
size_t lengthsSize = length * sizeof(int);
size_t startsSize = length * sizeof(int);
size_t codesSize = codesLen * sizeof(char);
size_t indexesSize = length * sizeof(int);
size_t scoresSize = length * sizeof(int);
size_t hBusSize = codesLen * sizeof(int2);
size_t memory = codesSize + startsSize + lengthsSize + indexesSize +
scoresSize + hBusSize;
return memory;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
extern void scoreLongDatabaseGpu(int* scores, int type, Chain* query,
LongDatabase* longDatabase, Scorer* scorer, int* indexes, int indexesLen,
int* cards, int cardsLen, Thread* thread) {
scoreDatabase(scores, type, &query, 1, longDatabase, scorer, indexes,
indexesLen, cards, cardsLen, thread);
}
extern void scoreLongDatabasesGpu(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread) {
scoreDatabase(scores, type, queries, queriesLen, longDatabase, scorer,
indexes, indexesLen, cards, cardsLen, thread);
}
//------------------------------------------------------------------------------
//******************************************************************************
//******************************************************************************
// PRIVATE
//------------------------------------------------------------------------------
// CONSTRUCTOR, DESTRUCTOR
static LongDatabase* createDatabase(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen) {
//**************************************************************************
// FILTER DATABASE AND REMEBER ORDER
int length = 0;
for (int i = 0; i < databaseLen; ++i) {
const int n = chainGetLength(database[i]);
if (n >= minLen && n < maxLen) {
length++;
}
}
if (length == 0) {
return NULL;
}
int* order = (int*) malloc(length * sizeof(int));
for (int i = 0, j = 0; i < databaseLen; ++i) {
const int n = chainGetLength(database[i]);
if (n >= minLen && n < maxLen) {
order[j++] = i;
}
}
LOG("Long database length: %d", length);
//**************************************************************************
//**************************************************************************
// CALCULATE DIMENSIONS
long codesLen = 0;
for (int i = 0; i < length; ++i) {
codesLen += chainGetLength(database[order[i]]);
}
LOG("Long database cells: %ld", codesLen);
//**************************************************************************
//**************************************************************************
// INIT STRUCTURES
size_t lengthsSize = length * sizeof(int);
int* lengths = (int*) malloc(lengthsSize);
size_t startsSize = length * sizeof(int);
int* starts = (int*) malloc(startsSize);
size_t codesSize = codesLen * sizeof(char);
char* codes = (char*) malloc(codesSize);
//**************************************************************************
//**************************************************************************
// CREATE STRUCTURES
long codesOff = 0;
for (int i = 0; i < length; ++i) {
Chain* chain = database[order[i]];
int n = chainGetLength(chain);
lengths[i] = n;
starts[i] = codesOff;
chainCopyCodes(chain, codes + codesOff);
codesOff += n;
}
//**************************************************************************
//**************************************************************************
// CREATE DEFAULT INDEXES
size_t indexesSize = length * sizeof(int);
int* indexes = (int*) malloc(indexesSize);
for (int i = 0; i < length; ++i) {
indexes[i] = i;
}
//**************************************************************************
//**************************************************************************
// CREATE POSITION ARRAY
int* positions = (int*) malloc(databaseLen * sizeof(int));
for (int i = 0; i < databaseLen; ++i) {
positions[i] = -1;
}
for (int i = 0; i < length; ++i) {
positions[order[i]] = i;
}
//**************************************************************************
//**************************************************************************
// CREATE GPU DATABASES
size_t gpuDatabasesSize = cardsLen * sizeof(GpuDatabase);
GpuDatabase* gpuDatabases = (GpuDatabase*) malloc(gpuDatabasesSize);
for (int i = 0; i < cardsLen; ++i) {
int card = cards[i];
CUDA_SAFE_CALL(hipSetDevice(card));
char* codesGpu;
CUDA_SAFE_CALL(hipMalloc(&codesGpu, codesSize));
CUDA_SAFE_CALL(hipMemcpy(codesGpu, codes, codesSize, TO_GPU));
int* startsGpu;
CUDA_SAFE_CALL(hipMalloc(&startsGpu, startsSize));
CUDA_SAFE_CALL(hipMemcpy(startsGpu, starts, startsSize, TO_GPU));
int* lengthsGpu;
CUDA_SAFE_CALL(hipMalloc(&lengthsGpu, lengthsSize));
CUDA_SAFE_CALL(hipMemcpy(lengthsGpu, lengths, lengthsSize, TO_GPU));
int* indexesGpu;
CUDA_SAFE_CALL(hipMalloc(&indexesGpu, indexesSize));
CUDA_SAFE_CALL(hipMemcpy(indexesGpu, indexes, indexesSize, TO_GPU));
// additional structures
size_t scoresSize = length * sizeof(int);
int* scoresGpu;
CUDA_SAFE_CALL(hipMalloc(&scoresGpu, scoresSize));
int2* hBusGpu;
size_t hBusSize = codesLen * sizeof(int2);
CUDA_SAFE_CALL(hipMalloc(&hBusGpu, hBusSize));
gpuDatabases[i].card = card;
gpuDatabases[i].codes = codesGpu;
gpuDatabases[i].starts = startsGpu;
gpuDatabases[i].lengths = lengthsGpu;
gpuDatabases[i].indexes = indexesGpu;
gpuDatabases[i].scores = scoresGpu;
gpuDatabases[i].hBus = hBusGpu;
#ifdef DEBUG
size_t memory = codesSize + startsSize + lengthsSize + indexesSize +
scoresSize + hBusSize;
LOG("Long database using %.2lfMBs on card %d", memory / 1024.0 / 1024.0, card);
#endif
}
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
free(codes);
free(starts);
free(lengths);
//**************************************************************************
size_t longDatabaseSize = sizeof(struct LongDatabase);
LongDatabase* longDatabase = (LongDatabase*) malloc(longDatabaseSize);
longDatabase->database = database;
longDatabase->databaseLen = databaseLen;
longDatabase->length = length;
longDatabase->order = order;
longDatabase->positions = positions;
longDatabase->indexes = indexes;
longDatabase->gpuDatabases = gpuDatabases;
longDatabase->gpuDatabasesLen = cardsLen;
return longDatabase;
}
static void deleteDatabase(LongDatabase* database) {
if (database == NULL) {
return;
}
for (int i = 0; i < database->gpuDatabasesLen; ++i) {
GpuDatabase* gpuDatabase = &(database->gpuDatabases[i]);
CUDA_SAFE_CALL(hipSetDevice(gpuDatabase->card));
CUDA_SAFE_CALL(hipFree(gpuDatabase->codes));
CUDA_SAFE_CALL(hipFree(gpuDatabase->starts));
CUDA_SAFE_CALL(hipFree(gpuDatabase->lengths));
CUDA_SAFE_CALL(hipFree(gpuDatabase->indexes));
CUDA_SAFE_CALL(hipFree(gpuDatabase->scores));
CUDA_SAFE_CALL(hipFree(gpuDatabase->hBus));
}
free(database->gpuDatabases);
free(database->order);
free(database->positions);
free(database->indexes);
free(database);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// SCORING
static void scoreDatabase(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread) {
ASSERT(cardsLen > 0, "no GPUs available");
Context* param = (Context*) malloc(sizeof(Context));
param->scores = scores;
param->type = type;
param->queries = queries;
param->queriesLen = queriesLen;
param->longDatabase = longDatabase;
param->scorer = scorer;
param->indexes = indexes;
param->indexesLen = indexesLen;
param->cards = cards;
param->cardsLen = cardsLen;
if (thread == NULL) {
scoreDatabaseThread(param);
} else {
threadCreate(thread, scoreDatabaseThread, (void*) param);
}
}
static void* scoreDatabaseThread(void* param) {
Context* context = (Context*) param;
int* scores = context->scores;
int type = context->type;
Chain** queries = context->queries;
int queriesLen = context->queriesLen;
LongDatabase* longDatabase = context->longDatabase;
Scorer* scorer = context->scorer;
int* indexes = context->indexes;
int indexesLen = context->indexesLen;
int* cards = context->cards;
int cardsLen = context->cardsLen;
if (longDatabase == NULL) {
free(param);
return NULL;
}
//**************************************************************************
// CREATE NEW INDEXES ARRAY IF NEEDED
int* newIndexes = NULL;
int newIndexesLen = 0;
int deleteIndexes;
if (indexes != NULL) {
// translate and filter indexes
int databaseLen = longDatabase->databaseLen;
int* positions = longDatabase->positions;
newIndexes = (int*) malloc(indexesLen * sizeof(int));
newIndexesLen = 0;
for (int i = 0; i < indexesLen; ++i) {
int idx = indexes[i];
if (idx < 0 || idx > databaseLen || positions[idx] == -1) {
continue;
}
newIndexes[newIndexesLen++] = positions[idx];
}
deleteIndexes = 1;
} else {
// load prebuilt defaults
newIndexes = longDatabase->indexes;
newIndexesLen = longDatabase->length;
deleteIndexes = 0;
}
//**************************************************************************
//**************************************************************************
// CHOOSE SOLVING FUNCTION
ScoringFunction function;
switch (type) {
case SW_ALIGN:
function = swSolve;
break;
case NW_ALIGN:
function = nwSolve;
break;
case HW_ALIGN:
function = hwSolve;
break;
case OV_ALIGN:
function = ovSolve;
break;
default:
ERROR("Wrong align type");
}
//**************************************************************************
//**************************************************************************
// SCORE MULTITHREADED
if (queriesLen < cardsLen) {
scoreDatabaseMulti(scores, function, queries, queriesLen, longDatabase,
scorer, newIndexes, newIndexesLen, cards, cardsLen);
} else {
scoreDatabaseSingle(scores, function, queries, queriesLen, longDatabase,
scorer, newIndexes, newIndexesLen, cards, cardsLen);
}
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
if (deleteIndexes) {
free(newIndexes);
}
free(param);
//**************************************************************************
return NULL;
}
static void scoreDatabaseMulti(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen) {
//**************************************************************************
// CREATE QUERY PROFILES
size_t profilesSize = queriesLen * sizeof(QueryProfile*);
QueryProfile** profiles = (QueryProfile**) malloc(profilesSize);
for (int i = 0; i < queriesLen; ++i) {
profiles[i] = createQueryProfile(queries[i], scorer);
}
//**************************************************************************
//**************************************************************************
// CREATE BALANCING DATA
Chain** database = longDatabase->database;
int* order = longDatabase->order;
size_t weightsSize = indexesLen * sizeof(int);
int* weights = (int*) malloc(weightsSize);
memset(weights, 0, weightsSize);
for (int i = 0; i < indexesLen; ++i) {
weights[i] += chainGetLength(database[order[indexes[i]]]);
}
//**************************************************************************
//**************************************************************************
// SCORE MULTICARDED
int contextsLen = cardsLen * queriesLen;
size_t contextsSize = contextsLen * sizeof(KernelContext);
KernelContext* contexts = (KernelContext*) malloc(contextsSize);
size_t tasksSize = contextsLen * sizeof(Thread);
Thread* tasks = (Thread*) malloc(tasksSize);
int databaseLen = longDatabase->databaseLen;
int cardsChunk = cardsLen / queriesLen;
int cardsAdd = cardsLen % queriesLen;
int cardsOff = 0;
int* idxChunksOff = (int*) malloc(cardsLen * sizeof(int));
int* idxChunksLens = (int*) malloc(cardsLen * sizeof(int));
int idxChunksLen = 0;
int length = 0;
for (int i = 0, k = 0; i < queriesLen; ++i) {
int cCardsLen = cardsChunk + (i < cardsAdd);
int* cCards = cards + cardsOff;
cardsOff += cCardsLen;
QueryProfile* queryProfile = profiles[i];
int chunks = min(cCardsLen, indexesLen);
if (chunks != idxChunksLen) {
weightChunkArray(idxChunksOff, idxChunksLens, &idxChunksLen,
weights, indexesLen, chunks);
}
for (int j = 0; j < idxChunksLen; ++j, ++k) {
contexts[k].scores = scores + i * databaseLen;
contexts[k].scoringFunction = scoringFunction;
contexts[k].queryProfile = queryProfile;
contexts[k].longDatabase = longDatabase;
contexts[k].scorer = scorer;
contexts[k].indexes = indexes + idxChunksOff[j];
contexts[k].indexesLen = idxChunksLens[j];
contexts[k].card = cCards[j];
threadCreate(&(tasks[k]), kernelThread, &(contexts[k]));
length++;
}
}
for (int i = 0; i < length; ++i) {
threadJoin(tasks[i]);
}
free(tasks);
free(contexts);
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
for (int i = 0; i < queriesLen; ++i) {
deleteQueryProfile(profiles[i]);
}
free(profiles);
free(weights);
free(idxChunksOff);
free(idxChunksLens);
//**************************************************************************
}
static void scoreDatabaseSingle(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen) {
//**************************************************************************
// CREATE CONTEXTS
size_t contextsSize = cardsLen * sizeof(KernelContext);
KernelContexts* contexts = (KernelContexts*) malloc(contextsSize);
for (int i = 0; i < cardsLen; ++i) {
size_t size = queriesLen * sizeof(KernelContext);
contexts[i].contexts = (KernelContext*) malloc(size);
contexts[i].contextsLen = 0;
contexts[i].cells = 0;
}
//**************************************************************************
//**************************************************************************
// SCORE MULTITHREADED
size_t tasksSize = cardsLen * sizeof(Thread);
Thread* tasks = (Thread*) malloc(tasksSize);
int databaseLen = longDatabase->databaseLen;
// balance tasks by round roobin, cardsLen is pretty small (CUDA cards)
for (int i = 0; i < queriesLen; ++i) {
int minIdx = 0;
long long minVal = contexts[0].cells;
for (int j = 1; j < cardsLen; ++j) {
if (contexts[j].cells < minVal) {
minVal = contexts[j].cells;
minIdx = j;
}
}
KernelContext context;
context.scores = scores + i * databaseLen;
context.scoringFunction = scoringFunction;
context.queryProfile = NULL;
context.query = queries[i];
context.longDatabase = longDatabase;
context.scorer = scorer;
context.indexes = indexes;
context.indexesLen = indexesLen;
context.card = cards[minIdx];
contexts[minIdx].contexts[contexts[minIdx].contextsLen++] = context;
contexts[minIdx].cells += chainGetLength(queries[i]);
}
for (int i = 0; i < cardsLen; ++i) {
threadCreate(&(tasks[i]), kernelsThread, &(contexts[i]));
}
for (int i = 0; i < cardsLen; ++i) {
threadJoin(tasks[i]);
}
free(tasks);
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
for (int i = 0; i < cardsLen; ++i) {
free(contexts[i].contexts);
}
free(contexts);
//**************************************************************************
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
static void* kernelsThread(void* param) {
KernelContexts* context = (KernelContexts*) param;
KernelContext* contexts = context->contexts;
int contextsLen = context->contextsLen;
for (int i = 0; i < contextsLen; ++i) {
Chain* query = contexts[i].query;
Scorer* scorer = contexts[i].scorer;
int card = contexts[i].card;
int currentCard;
CUDA_SAFE_CALL(hipGetDevice(¤tCard));
if (currentCard != card) {
CUDA_SAFE_CALL(hipSetDevice(card));
}
contexts[i].queryProfile = createQueryProfile(query, scorer);
kernelThread(&(contexts[i]));
deleteQueryProfile(contexts[i].queryProfile);
}
return NULL;
}
static void* kernelThread(void* param) {
KernelContext* context = (KernelContext*) param;
int* scores = context->scores;
ScoringFunction scoringFunction = context->scoringFunction;
QueryProfile* queryProfile = context->queryProfile;
LongDatabase* longDatabase = context->longDatabase;
Scorer* scorer = context->scorer;
int* indexes = context->indexes;
int indexesLen = context->indexesLen;
int card = context->card;
//**************************************************************************
// FIND DATABASE
GpuDatabase* gpuDatabases = longDatabase->gpuDatabases;
int gpuDatabasesLen = longDatabase->gpuDatabasesLen;
GpuDatabase* gpuDatabase = NULL;
for (int i = 0; i < gpuDatabasesLen; ++i) {
if (gpuDatabases[i].card == card) {
gpuDatabase = &(gpuDatabases[i]);
break;
}
}
ASSERT(gpuDatabase != NULL, "Long database not available on card %d", card);
//**************************************************************************
//**************************************************************************
// CUDA SETUP
int currentCard;
CUDA_SAFE_CALL(hipGetDevice(¤tCard));
if (currentCard != card) {
CUDA_SAFE_CALL(hipSetDevice(card));
}
//**************************************************************************
//**************************************************************************
// FIX INDEXES
int deleteIndexes;
int* indexesGpu;
if (indexesLen == longDatabase->length) {
indexes = longDatabase->indexes;
indexesLen = longDatabase->length;
indexesGpu = gpuDatabase->indexes;
deleteIndexes = 0;
} else {
size_t indexesSize = indexesLen * sizeof(int);
CUDA_SAFE_CALL(hipMalloc(&indexesGpu, indexesSize));
CUDA_SAFE_CALL(hipMemcpy(indexesGpu, indexes, indexesSize, TO_GPU));
deleteIndexes = 1;
}
//**************************************************************************
//**************************************************************************
// PREPARE GPU
QueryProfileGpu* queryProfileGpu = createQueryProfileGpu(queryProfile);
int gapOpen = scorerGetGapOpen(scorer);
int gapExtend = scorerGetGapExtend(scorer);
int rows = queryProfile->length;
int rowsGpu = queryProfile->height * 4;
int iters = rowsGpu / (THREADS * 4) + (rowsGpu % (THREADS * 4) != 0);
CUDA_SAFE_CALL(hipMemcpyToSymbol(gapOpen_, &gapOpen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(gapExtend_, &gapExtend, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(rows_, &rows, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(rowsPadded_, &rowsGpu, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(length_, &indexesLen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(iters_, &iters, sizeof(int)));
//**************************************************************************
//**************************************************************************
// SOLVE
char* codesGpu = gpuDatabase->codes;
int* startsGpu = gpuDatabase->starts;
int* lengthsGpu = gpuDatabase->lengths;
int* scoresGpu = gpuDatabase->scores;
int2* hBusGpu = gpuDatabase->hBus;
hipLaunchKernelGGL(( scoringFunction), dim3(BLOCKS), dim3(THREADS), 0, 0, codesGpu, startsGpu, lengthsGpu,
indexesGpu, scoresGpu, hBusGpu);
//**************************************************************************
//**************************************************************************
// SAVE RESULTS
int length = longDatabase->length;
size_t scoresSize = length * sizeof(int);
int* scoresCpu = (int*) malloc(scoresSize);
CUDA_SAFE_CALL(hipMemcpy(scoresCpu, scoresGpu, scoresSize, FROM_GPU));
int* order = longDatabase->order;
for (int i = 0; i < indexesLen; ++i) {
scores[order[indexes[i]]] = scoresCpu[indexes[i]];
}
free(scoresCpu);
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
deleteQueryProfileGpu(queryProfileGpu);
if (deleteIndexes) {
CUDA_SAFE_CALL(hipFree(indexesGpu));
}
//**************************************************************************
return NULL;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// GPU KERNELS
__global__ void hwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
hwSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__global__ void nwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
nwSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__global__ void ovSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
ovSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__global__ void swSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
swSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__device__ static int gap(int index) {
return (-gapOpen_ - index * gapExtend_) * (index >= 0);
}
__device__ void hwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = SCORE_MIN;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = INT4_SCORE_MIN;
hBusScrShr[threadIdx.x] = 0;
hBusAffShr[threadIdx.x] = SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (iter != 0 && threadIdx.x == 0) {
atom.up = hBus[off + col];
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
if (row + 0 == rows_ - 1) score = max(score, atom.rScr.x);
if (row + 1 == rows_ - 1) score = max(score, atom.rScr.y);
if (row + 2 == rows_ - 1) score = max(score, atom.rScr.z);
if (row + 3 == rows_ - 1) score = max(score, atom.rScr.w);
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
__device__ void nwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = SCORE_MIN;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = INT4_SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (threadIdx.x == 0) {
if (iter == 0) {
atom.up.x = gap(col);
atom.up.y = SCORE_MIN;
} else {
atom.up = hBus[off + col];
}
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
if (row + 0 == rows_ - 1) score = max(score, atom.lScr.x);
if (row + 1 == rows_ - 1) score = max(score, atom.lScr.y);
if (row + 2 == rows_ - 1) score = max(score, atom.lScr.z);
if (row + 3 == rows_ - 1) score = max(score, atom.lScr.w);
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
__device__ void ovSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = SCORE_MIN;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
hBusScrShr[threadIdx.x] = 0;
hBusAffShr[threadIdx.x] = SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (iter != 0 && threadIdx.x == 0) {
atom.up = hBus[off + col];
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
if (row + 0 == rows_ - 1) score = max(score, atom.rScr.x);
if (row + 1 == rows_ - 1) score = max(score, atom.rScr.y);
if (row + 2 == rows_ - 1) score = max(score, atom.rScr.z);
if (row + 3 == rows_ - 1) score = max(score, atom.rScr.w);
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
if (row < rows_) {
score = max(score, atom.lScr.x);
score = max(score, atom.lScr.y);
score = max(score, atom.lScr.z);
score = max(score, atom.lScr.w);
}
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
__device__ void swSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = 0;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
hBusScrShr[threadIdx.x] = 0;
hBusAffShr[threadIdx.x] = SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (iter != 0 && threadIdx.x == 0) {
atom.up = hBus[off + col];
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX4(0, mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX4(0, mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX4(0, mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX4(0, mch, del, ins);
atom.rAff.w = ins;
score = max(score, atom.rScr.x);
score = max(score, atom.rScr.y);
score = max(score, atom.rScr.z);
score = max(score, atom.rScr.w);
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// QUERY PROFILE
static QueryProfile* createQueryProfile(Chain* query, Scorer* scorer) {
int rows = chainGetLength(query);
int rowsGpu = rows + (8 - rows % 8) % 8;
int width = scorerGetMaxCode(scorer) + 1;
int height = rowsGpu / 4;
char* row = (char*) malloc(rows * sizeof(char));
chainCopyCodes(query, row);
size_t size = width * height * sizeof(char4);
char4* data = (char4*) malloc(size);
memset(data, 0, size);
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width - 1; ++j) {
char4 scr;
scr.x = i * 4 + 0 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 0], j);
scr.y = i * 4 + 1 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 1], j);
scr.z = i * 4 + 2 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 2], j);
scr.w = i * 4 + 3 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 3], j);
data[i * width + j] = scr;
}
}
free(row);
QueryProfile* queryProfile = (QueryProfile*) malloc(sizeof(QueryProfile));
queryProfile->data = data;
queryProfile->width = width;
queryProfile->height = height;
queryProfile->length = rows;
queryProfile->size = size;
return queryProfile;
}
static void deleteQueryProfile(QueryProfile* queryProfile) {
free(queryProfile->data);
free(queryProfile);
}
static QueryProfileGpu* createQueryProfileGpu(QueryProfile* queryProfile) {
int width = queryProfile->width;
int height = queryProfile->height;
size_t size = queryProfile->size;
char4* data = queryProfile->data;
hipArray* dataGpu;
CUDA_SAFE_CALL(hipMallocArray(&dataGpu, &qpTexture.channelDesc, width, height));
CUDA_SAFE_CALL(hipMemcpyToArray (dataGpu, 0, 0, data, size, TO_GPU));
CUDA_SAFE_CALL(hipBindTextureToArray(qpTexture, dataGpu));
qpTexture.addressMode[0] = hipAddressModeClamp;
qpTexture.addressMode[1] = hipAddressModeClamp;
qpTexture.filterMode = hipFilterModePoint;
qpTexture.normalized = false;
size_t queryProfileGpuSize = sizeof(QueryProfileGpu);
QueryProfileGpu* queryProfileGpu = (QueryProfileGpu*) malloc(queryProfileGpuSize);
queryProfileGpu->data = dataGpu;
return queryProfileGpu;
}
static void deleteQueryProfileGpu(QueryProfileGpu* queryProfileGpu) {
CUDA_SAFE_CALL(hipFreeArray(queryProfileGpu->data));
CUDA_SAFE_CALL(hipUnbindTexture(qpTexture));
free(queryProfileGpu);
}
//------------------------------------------------------------------------------
//******************************************************************************
#endif // __HIPCC__
|
3a41e493a67ddbe5a977cfb46a4d07e17b7b5216.cu
|
/*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile Šikić
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifdef __CUDACC__
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "chain.h"
#include "constants.h"
#include "cuda_utils.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "utils.h"
#include "score_database_gpu_long.h"
#define THREADS 64
#define BLOCKS 240
#define MAX_THREADS THREADS
#define INT4_ZERO make_int4(0, 0, 0, 0)
#define INT4_SCORE_MIN make_int4(SCORE_MIN, SCORE_MIN, SCORE_MIN, SCORE_MIN)
typedef struct GpuDatabase {
int card;
char* codes;
int* starts;
int* lengths;
int* indexes;
int* scores;
int2* hBus;
} GpuDatabase;
struct LongDatabase {
Chain** database;
int databaseLen;
int length;
int* order;
int* positions;
int* indexes;
GpuDatabase* gpuDatabases;
int gpuDatabasesLen;
};
typedef struct Context {
int* scores;
int type;
Chain** queries;
int queriesLen;
LongDatabase* longDatabase;
Scorer* scorer;
int* indexes;
int indexesLen;
int* cards;
int cardsLen;
} Context;
typedef struct QueryProfile {
int height;
int width;
int length;
char4* data;
size_t size;
} QueryProfile;
typedef struct QueryProfileGpu {
cudaArray* data;
} QueryProfileGpu;
typedef void (*ScoringFunction)(char*, int*, int*, int*, int*, int2*);
typedef struct KernelContext {
int* scores;
ScoringFunction scoringFunction;
QueryProfile* queryProfile;
Chain* query;
LongDatabase* longDatabase;
Scorer* scorer;
int* indexes;
int indexesLen;
int card;
} KernelContext;
typedef struct KernelContexts {
KernelContext* contexts;
int contextsLen;
long long cells;
} KernelContexts;
typedef struct Atom {
int mch;
int2 up;
int4 lScr;
int4 lAff;
int4 rScr;
int4 rAff;
} Atom;
static __constant__ int gapOpen_;
static __constant__ int gapExtend_;
static __constant__ int rows_;
static __constant__ int rowsPadded_;
static __constant__ int length_;
static __constant__ int iters_;
texture<char4, 2, cudaReadModeElementType> qpTexture;
//******************************************************************************
// PUBLIC
extern LongDatabase* longDatabaseCreate(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen);
extern void longDatabaseDelete(LongDatabase* longDatabase);
extern void scoreLongDatabaseGpu(int* scores, int type, Chain* query,
LongDatabase* longDatabase, Scorer* scorer, int* indexes, int indexesLen,
int* cards, int cardsLen, Thread* thread);
extern void scoreLongDatabasesGpu(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread);
//******************************************************************************
//******************************************************************************
// PRIVATE
// constructor
static LongDatabase* createDatabase(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen);
// destructor
static void deleteDatabase(LongDatabase* database);
// scoring
static void scoreDatabase(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread);
static void* scoreDatabaseThread(void* param);
static void scoreDatabaseMulti(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen);
static void scoreDatabaseSingle(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen);
// cpu kernels
static void* kernelThread(void* param);
static void* kernelsThread(void* param);
// gpu kernels
__global__ void hwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__global__ void nwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__global__ void ovSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__global__ void swSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus);
__device__ static int gap(int index);
__device__ void hwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
__device__ void nwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
__device__ void ovSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
__device__ void swSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus);
// query profile
static QueryProfile* createQueryProfile(Chain* query, Scorer* scorer);
static void deleteQueryProfile(QueryProfile* queryProfile);
static QueryProfileGpu* createQueryProfileGpu(QueryProfile* queryProfile);
static void deleteQueryProfileGpu(QueryProfileGpu* queryProfileGpu);
//******************************************************************************
//******************************************************************************
// PUBLIC
//------------------------------------------------------------------------------
// CONSTRUCTOR, DESTRUCTOR
extern LongDatabase* longDatabaseCreate(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen) {
return createDatabase(database, databaseLen, minLen, maxLen, cards, cardsLen);
}
extern void longDatabaseDelete(LongDatabase* longDatabase) {
deleteDatabase(longDatabase);
}
extern size_t longDatabaseGpuMemoryConsumption(Chain** database, int databaseLen,
int minLen, int maxLen) {
int length = 0;
long codesLen = 0;
for (int i = 0; i < databaseLen; ++i) {
const int n = chainGetLength(database[i]);
if (n >= minLen && n < maxLen) {
codesLen += n;
length++;
}
}
size_t lengthsSize = length * sizeof(int);
size_t startsSize = length * sizeof(int);
size_t codesSize = codesLen * sizeof(char);
size_t indexesSize = length * sizeof(int);
size_t scoresSize = length * sizeof(int);
size_t hBusSize = codesLen * sizeof(int2);
size_t memory = codesSize + startsSize + lengthsSize + indexesSize +
scoresSize + hBusSize;
return memory;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
extern void scoreLongDatabaseGpu(int* scores, int type, Chain* query,
LongDatabase* longDatabase, Scorer* scorer, int* indexes, int indexesLen,
int* cards, int cardsLen, Thread* thread) {
scoreDatabase(scores, type, &query, 1, longDatabase, scorer, indexes,
indexesLen, cards, cardsLen, thread);
}
extern void scoreLongDatabasesGpu(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread) {
scoreDatabase(scores, type, queries, queriesLen, longDatabase, scorer,
indexes, indexesLen, cards, cardsLen, thread);
}
//------------------------------------------------------------------------------
//******************************************************************************
//******************************************************************************
// PRIVATE
//------------------------------------------------------------------------------
// CONSTRUCTOR, DESTRUCTOR
static LongDatabase* createDatabase(Chain** database, int databaseLen,
int minLen, int maxLen, int* cards, int cardsLen) {
//**************************************************************************
// FILTER DATABASE AND REMEBER ORDER
int length = 0;
for (int i = 0; i < databaseLen; ++i) {
const int n = chainGetLength(database[i]);
if (n >= minLen && n < maxLen) {
length++;
}
}
if (length == 0) {
return NULL;
}
int* order = (int*) malloc(length * sizeof(int));
for (int i = 0, j = 0; i < databaseLen; ++i) {
const int n = chainGetLength(database[i]);
if (n >= minLen && n < maxLen) {
order[j++] = i;
}
}
LOG("Long database length: %d", length);
//**************************************************************************
//**************************************************************************
// CALCULATE DIMENSIONS
long codesLen = 0;
for (int i = 0; i < length; ++i) {
codesLen += chainGetLength(database[order[i]]);
}
LOG("Long database cells: %ld", codesLen);
//**************************************************************************
//**************************************************************************
// INIT STRUCTURES
size_t lengthsSize = length * sizeof(int);
int* lengths = (int*) malloc(lengthsSize);
size_t startsSize = length * sizeof(int);
int* starts = (int*) malloc(startsSize);
size_t codesSize = codesLen * sizeof(char);
char* codes = (char*) malloc(codesSize);
//**************************************************************************
//**************************************************************************
// CREATE STRUCTURES
long codesOff = 0;
for (int i = 0; i < length; ++i) {
Chain* chain = database[order[i]];
int n = chainGetLength(chain);
lengths[i] = n;
starts[i] = codesOff;
chainCopyCodes(chain, codes + codesOff);
codesOff += n;
}
//**************************************************************************
//**************************************************************************
// CREATE DEFAULT INDEXES
size_t indexesSize = length * sizeof(int);
int* indexes = (int*) malloc(indexesSize);
for (int i = 0; i < length; ++i) {
indexes[i] = i;
}
//**************************************************************************
//**************************************************************************
// CREATE POSITION ARRAY
int* positions = (int*) malloc(databaseLen * sizeof(int));
for (int i = 0; i < databaseLen; ++i) {
positions[i] = -1;
}
for (int i = 0; i < length; ++i) {
positions[order[i]] = i;
}
//**************************************************************************
//**************************************************************************
// CREATE GPU DATABASES
size_t gpuDatabasesSize = cardsLen * sizeof(GpuDatabase);
GpuDatabase* gpuDatabases = (GpuDatabase*) malloc(gpuDatabasesSize);
for (int i = 0; i < cardsLen; ++i) {
int card = cards[i];
CUDA_SAFE_CALL(cudaSetDevice(card));
char* codesGpu;
CUDA_SAFE_CALL(cudaMalloc(&codesGpu, codesSize));
CUDA_SAFE_CALL(cudaMemcpy(codesGpu, codes, codesSize, TO_GPU));
int* startsGpu;
CUDA_SAFE_CALL(cudaMalloc(&startsGpu, startsSize));
CUDA_SAFE_CALL(cudaMemcpy(startsGpu, starts, startsSize, TO_GPU));
int* lengthsGpu;
CUDA_SAFE_CALL(cudaMalloc(&lengthsGpu, lengthsSize));
CUDA_SAFE_CALL(cudaMemcpy(lengthsGpu, lengths, lengthsSize, TO_GPU));
int* indexesGpu;
CUDA_SAFE_CALL(cudaMalloc(&indexesGpu, indexesSize));
CUDA_SAFE_CALL(cudaMemcpy(indexesGpu, indexes, indexesSize, TO_GPU));
// additional structures
size_t scoresSize = length * sizeof(int);
int* scoresGpu;
CUDA_SAFE_CALL(cudaMalloc(&scoresGpu, scoresSize));
int2* hBusGpu;
size_t hBusSize = codesLen * sizeof(int2);
CUDA_SAFE_CALL(cudaMalloc(&hBusGpu, hBusSize));
gpuDatabases[i].card = card;
gpuDatabases[i].codes = codesGpu;
gpuDatabases[i].starts = startsGpu;
gpuDatabases[i].lengths = lengthsGpu;
gpuDatabases[i].indexes = indexesGpu;
gpuDatabases[i].scores = scoresGpu;
gpuDatabases[i].hBus = hBusGpu;
#ifdef DEBUG
size_t memory = codesSize + startsSize + lengthsSize + indexesSize +
scoresSize + hBusSize;
LOG("Long database using %.2lfMBs on card %d", memory / 1024.0 / 1024.0, card);
#endif
}
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
free(codes);
free(starts);
free(lengths);
//**************************************************************************
size_t longDatabaseSize = sizeof(struct LongDatabase);
LongDatabase* longDatabase = (LongDatabase*) malloc(longDatabaseSize);
longDatabase->database = database;
longDatabase->databaseLen = databaseLen;
longDatabase->length = length;
longDatabase->order = order;
longDatabase->positions = positions;
longDatabase->indexes = indexes;
longDatabase->gpuDatabases = gpuDatabases;
longDatabase->gpuDatabasesLen = cardsLen;
return longDatabase;
}
static void deleteDatabase(LongDatabase* database) {
if (database == NULL) {
return;
}
for (int i = 0; i < database->gpuDatabasesLen; ++i) {
GpuDatabase* gpuDatabase = &(database->gpuDatabases[i]);
CUDA_SAFE_CALL(cudaSetDevice(gpuDatabase->card));
CUDA_SAFE_CALL(cudaFree(gpuDatabase->codes));
CUDA_SAFE_CALL(cudaFree(gpuDatabase->starts));
CUDA_SAFE_CALL(cudaFree(gpuDatabase->lengths));
CUDA_SAFE_CALL(cudaFree(gpuDatabase->indexes));
CUDA_SAFE_CALL(cudaFree(gpuDatabase->scores));
CUDA_SAFE_CALL(cudaFree(gpuDatabase->hBus));
}
free(database->gpuDatabases);
free(database->order);
free(database->positions);
free(database->indexes);
free(database);
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// SCORING
static void scoreDatabase(int* scores, int type, Chain** queries,
int queriesLen, LongDatabase* longDatabase, Scorer* scorer, int* indexes,
int indexesLen, int* cards, int cardsLen, Thread* thread) {
ASSERT(cardsLen > 0, "no GPUs available");
Context* param = (Context*) malloc(sizeof(Context));
param->scores = scores;
param->type = type;
param->queries = queries;
param->queriesLen = queriesLen;
param->longDatabase = longDatabase;
param->scorer = scorer;
param->indexes = indexes;
param->indexesLen = indexesLen;
param->cards = cards;
param->cardsLen = cardsLen;
if (thread == NULL) {
scoreDatabaseThread(param);
} else {
threadCreate(thread, scoreDatabaseThread, (void*) param);
}
}
static void* scoreDatabaseThread(void* param) {
Context* context = (Context*) param;
int* scores = context->scores;
int type = context->type;
Chain** queries = context->queries;
int queriesLen = context->queriesLen;
LongDatabase* longDatabase = context->longDatabase;
Scorer* scorer = context->scorer;
int* indexes = context->indexes;
int indexesLen = context->indexesLen;
int* cards = context->cards;
int cardsLen = context->cardsLen;
if (longDatabase == NULL) {
free(param);
return NULL;
}
//**************************************************************************
// CREATE NEW INDEXES ARRAY IF NEEDED
int* newIndexes = NULL;
int newIndexesLen = 0;
int deleteIndexes;
if (indexes != NULL) {
// translate and filter indexes
int databaseLen = longDatabase->databaseLen;
int* positions = longDatabase->positions;
newIndexes = (int*) malloc(indexesLen * sizeof(int));
newIndexesLen = 0;
for (int i = 0; i < indexesLen; ++i) {
int idx = indexes[i];
if (idx < 0 || idx > databaseLen || positions[idx] == -1) {
continue;
}
newIndexes[newIndexesLen++] = positions[idx];
}
deleteIndexes = 1;
} else {
// load prebuilt defaults
newIndexes = longDatabase->indexes;
newIndexesLen = longDatabase->length;
deleteIndexes = 0;
}
//**************************************************************************
//**************************************************************************
// CHOOSE SOLVING FUNCTION
ScoringFunction function;
switch (type) {
case SW_ALIGN:
function = swSolve;
break;
case NW_ALIGN:
function = nwSolve;
break;
case HW_ALIGN:
function = hwSolve;
break;
case OV_ALIGN:
function = ovSolve;
break;
default:
ERROR("Wrong align type");
}
//**************************************************************************
//**************************************************************************
// SCORE MULTITHREADED
if (queriesLen < cardsLen) {
scoreDatabaseMulti(scores, function, queries, queriesLen, longDatabase,
scorer, newIndexes, newIndexesLen, cards, cardsLen);
} else {
scoreDatabaseSingle(scores, function, queries, queriesLen, longDatabase,
scorer, newIndexes, newIndexesLen, cards, cardsLen);
}
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
if (deleteIndexes) {
free(newIndexes);
}
free(param);
//**************************************************************************
return NULL;
}
static void scoreDatabaseMulti(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen) {
//**************************************************************************
// CREATE QUERY PROFILES
size_t profilesSize = queriesLen * sizeof(QueryProfile*);
QueryProfile** profiles = (QueryProfile**) malloc(profilesSize);
for (int i = 0; i < queriesLen; ++i) {
profiles[i] = createQueryProfile(queries[i], scorer);
}
//**************************************************************************
//**************************************************************************
// CREATE BALANCING DATA
Chain** database = longDatabase->database;
int* order = longDatabase->order;
size_t weightsSize = indexesLen * sizeof(int);
int* weights = (int*) malloc(weightsSize);
memset(weights, 0, weightsSize);
for (int i = 0; i < indexesLen; ++i) {
weights[i] += chainGetLength(database[order[indexes[i]]]);
}
//**************************************************************************
//**************************************************************************
// SCORE MULTICARDED
int contextsLen = cardsLen * queriesLen;
size_t contextsSize = contextsLen * sizeof(KernelContext);
KernelContext* contexts = (KernelContext*) malloc(contextsSize);
size_t tasksSize = contextsLen * sizeof(Thread);
Thread* tasks = (Thread*) malloc(tasksSize);
int databaseLen = longDatabase->databaseLen;
int cardsChunk = cardsLen / queriesLen;
int cardsAdd = cardsLen % queriesLen;
int cardsOff = 0;
int* idxChunksOff = (int*) malloc(cardsLen * sizeof(int));
int* idxChunksLens = (int*) malloc(cardsLen * sizeof(int));
int idxChunksLen = 0;
int length = 0;
for (int i = 0, k = 0; i < queriesLen; ++i) {
int cCardsLen = cardsChunk + (i < cardsAdd);
int* cCards = cards + cardsOff;
cardsOff += cCardsLen;
QueryProfile* queryProfile = profiles[i];
int chunks = min(cCardsLen, indexesLen);
if (chunks != idxChunksLen) {
weightChunkArray(idxChunksOff, idxChunksLens, &idxChunksLen,
weights, indexesLen, chunks);
}
for (int j = 0; j < idxChunksLen; ++j, ++k) {
contexts[k].scores = scores + i * databaseLen;
contexts[k].scoringFunction = scoringFunction;
contexts[k].queryProfile = queryProfile;
contexts[k].longDatabase = longDatabase;
contexts[k].scorer = scorer;
contexts[k].indexes = indexes + idxChunksOff[j];
contexts[k].indexesLen = idxChunksLens[j];
contexts[k].card = cCards[j];
threadCreate(&(tasks[k]), kernelThread, &(contexts[k]));
length++;
}
}
for (int i = 0; i < length; ++i) {
threadJoin(tasks[i]);
}
free(tasks);
free(contexts);
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
for (int i = 0; i < queriesLen; ++i) {
deleteQueryProfile(profiles[i]);
}
free(profiles);
free(weights);
free(idxChunksOff);
free(idxChunksLens);
//**************************************************************************
}
static void scoreDatabaseSingle(int* scores, ScoringFunction scoringFunction,
Chain** queries, int queriesLen, LongDatabase* longDatabase, Scorer* scorer,
int* indexes, int indexesLen, int* cards, int cardsLen) {
//**************************************************************************
// CREATE CONTEXTS
size_t contextsSize = cardsLen * sizeof(KernelContext);
KernelContexts* contexts = (KernelContexts*) malloc(contextsSize);
for (int i = 0; i < cardsLen; ++i) {
size_t size = queriesLen * sizeof(KernelContext);
contexts[i].contexts = (KernelContext*) malloc(size);
contexts[i].contextsLen = 0;
contexts[i].cells = 0;
}
//**************************************************************************
//**************************************************************************
// SCORE MULTITHREADED
size_t tasksSize = cardsLen * sizeof(Thread);
Thread* tasks = (Thread*) malloc(tasksSize);
int databaseLen = longDatabase->databaseLen;
// balance tasks by round roobin, cardsLen is pretty small (CUDA cards)
for (int i = 0; i < queriesLen; ++i) {
int minIdx = 0;
long long minVal = contexts[0].cells;
for (int j = 1; j < cardsLen; ++j) {
if (contexts[j].cells < minVal) {
minVal = contexts[j].cells;
minIdx = j;
}
}
KernelContext context;
context.scores = scores + i * databaseLen;
context.scoringFunction = scoringFunction;
context.queryProfile = NULL;
context.query = queries[i];
context.longDatabase = longDatabase;
context.scorer = scorer;
context.indexes = indexes;
context.indexesLen = indexesLen;
context.card = cards[minIdx];
contexts[minIdx].contexts[contexts[minIdx].contextsLen++] = context;
contexts[minIdx].cells += chainGetLength(queries[i]);
}
for (int i = 0; i < cardsLen; ++i) {
threadCreate(&(tasks[i]), kernelsThread, &(contexts[i]));
}
for (int i = 0; i < cardsLen; ++i) {
threadJoin(tasks[i]);
}
free(tasks);
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
for (int i = 0; i < cardsLen; ++i) {
free(contexts[i].contexts);
}
free(contexts);
//**************************************************************************
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
static void* kernelsThread(void* param) {
KernelContexts* context = (KernelContexts*) param;
KernelContext* contexts = context->contexts;
int contextsLen = context->contextsLen;
for (int i = 0; i < contextsLen; ++i) {
Chain* query = contexts[i].query;
Scorer* scorer = contexts[i].scorer;
int card = contexts[i].card;
int currentCard;
CUDA_SAFE_CALL(cudaGetDevice(¤tCard));
if (currentCard != card) {
CUDA_SAFE_CALL(cudaSetDevice(card));
}
contexts[i].queryProfile = createQueryProfile(query, scorer);
kernelThread(&(contexts[i]));
deleteQueryProfile(contexts[i].queryProfile);
}
return NULL;
}
static void* kernelThread(void* param) {
KernelContext* context = (KernelContext*) param;
int* scores = context->scores;
ScoringFunction scoringFunction = context->scoringFunction;
QueryProfile* queryProfile = context->queryProfile;
LongDatabase* longDatabase = context->longDatabase;
Scorer* scorer = context->scorer;
int* indexes = context->indexes;
int indexesLen = context->indexesLen;
int card = context->card;
//**************************************************************************
// FIND DATABASE
GpuDatabase* gpuDatabases = longDatabase->gpuDatabases;
int gpuDatabasesLen = longDatabase->gpuDatabasesLen;
GpuDatabase* gpuDatabase = NULL;
for (int i = 0; i < gpuDatabasesLen; ++i) {
if (gpuDatabases[i].card == card) {
gpuDatabase = &(gpuDatabases[i]);
break;
}
}
ASSERT(gpuDatabase != NULL, "Long database not available on card %d", card);
//**************************************************************************
//**************************************************************************
// CUDA SETUP
int currentCard;
CUDA_SAFE_CALL(cudaGetDevice(¤tCard));
if (currentCard != card) {
CUDA_SAFE_CALL(cudaSetDevice(card));
}
//**************************************************************************
//**************************************************************************
// FIX INDEXES
int deleteIndexes;
int* indexesGpu;
if (indexesLen == longDatabase->length) {
indexes = longDatabase->indexes;
indexesLen = longDatabase->length;
indexesGpu = gpuDatabase->indexes;
deleteIndexes = 0;
} else {
size_t indexesSize = indexesLen * sizeof(int);
CUDA_SAFE_CALL(cudaMalloc(&indexesGpu, indexesSize));
CUDA_SAFE_CALL(cudaMemcpy(indexesGpu, indexes, indexesSize, TO_GPU));
deleteIndexes = 1;
}
//**************************************************************************
//**************************************************************************
// PREPARE GPU
QueryProfileGpu* queryProfileGpu = createQueryProfileGpu(queryProfile);
int gapOpen = scorerGetGapOpen(scorer);
int gapExtend = scorerGetGapExtend(scorer);
int rows = queryProfile->length;
int rowsGpu = queryProfile->height * 4;
int iters = rowsGpu / (THREADS * 4) + (rowsGpu % (THREADS * 4) != 0);
CUDA_SAFE_CALL(cudaMemcpyToSymbol(gapOpen_, &gapOpen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(gapExtend_, &gapExtend, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(rows_, &rows, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(rowsPadded_, &rowsGpu, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(length_, &indexesLen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(iters_, &iters, sizeof(int)));
//**************************************************************************
//**************************************************************************
// SOLVE
char* codesGpu = gpuDatabase->codes;
int* startsGpu = gpuDatabase->starts;
int* lengthsGpu = gpuDatabase->lengths;
int* scoresGpu = gpuDatabase->scores;
int2* hBusGpu = gpuDatabase->hBus;
scoringFunction<<<BLOCKS, THREADS>>>(codesGpu, startsGpu, lengthsGpu,
indexesGpu, scoresGpu, hBusGpu);
//**************************************************************************
//**************************************************************************
// SAVE RESULTS
int length = longDatabase->length;
size_t scoresSize = length * sizeof(int);
int* scoresCpu = (int*) malloc(scoresSize);
CUDA_SAFE_CALL(cudaMemcpy(scoresCpu, scoresGpu, scoresSize, FROM_GPU));
int* order = longDatabase->order;
for (int i = 0; i < indexesLen; ++i) {
scores[order[indexes[i]]] = scoresCpu[indexes[i]];
}
free(scoresCpu);
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
deleteQueryProfileGpu(queryProfileGpu);
if (deleteIndexes) {
CUDA_SAFE_CALL(cudaFree(indexesGpu));
}
//**************************************************************************
return NULL;
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// GPU KERNELS
__global__ void hwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
hwSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__global__ void nwSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
nwSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__global__ void ovSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
ovSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__global__ void swSolve(char* codes, int* starts, int* lengths, int* indexes,
int* scores, int2* hBus) {
for (int i = blockIdx.x; i < length_; i += gridDim.x) {
swSolveSingle(indexes[i], codes, starts, lengths, scores, hBus);
}
}
__device__ static int gap(int index) {
return (-gapOpen_ - index * gapExtend_) * (index >= 0);
}
__device__ void hwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = SCORE_MIN;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = INT4_SCORE_MIN;
hBusScrShr[threadIdx.x] = 0;
hBusAffShr[threadIdx.x] = SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (iter != 0 && threadIdx.x == 0) {
atom.up = hBus[off + col];
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
if (row + 0 == rows_ - 1) score = max(score, atom.rScr.x);
if (row + 1 == rows_ - 1) score = max(score, atom.rScr.y);
if (row + 2 == rows_ - 1) score = max(score, atom.rScr.z);
if (row + 3 == rows_ - 1) score = max(score, atom.rScr.w);
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
__device__ void nwSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = SCORE_MIN;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));
atom.lAff = INT4_SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (threadIdx.x == 0) {
if (iter == 0) {
atom.up.x = gap(col);
atom.up.y = SCORE_MIN;
} else {
atom.up = hBus[off + col];
}
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
if (row + 0 == rows_ - 1) score = max(score, atom.lScr.x);
if (row + 1 == rows_ - 1) score = max(score, atom.lScr.y);
if (row + 2 == rows_ - 1) score = max(score, atom.lScr.z);
if (row + 3 == rows_ - 1) score = max(score, atom.lScr.w);
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = gap(row - 1);
atom.lScr = make_int4(gap(row), gap(row + 1), gap(row + 2), gap(row + 3));;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
__device__ void ovSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = SCORE_MIN;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
hBusScrShr[threadIdx.x] = 0;
hBusAffShr[threadIdx.x] = SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (iter != 0 && threadIdx.x == 0) {
atom.up = hBus[off + col];
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
if (row + 0 == rows_ - 1) score = max(score, atom.rScr.x);
if (row + 1 == rows_ - 1) score = max(score, atom.rScr.y);
if (row + 2 == rows_ - 1) score = max(score, atom.rScr.z);
if (row + 3 == rows_ - 1) score = max(score, atom.rScr.w);
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
if (row < rows_) {
score = max(score, atom.lScr.x);
score = max(score, atom.lScr.y);
score = max(score, atom.lScr.z);
score = max(score, atom.lScr.w);
}
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
__device__ void swSolveSingle(int id, char* codes, int* starts, int* lengths,
int* scores, int2* hBus) {
__shared__ int scoresShr[MAX_THREADS];
__shared__ int hBusScrShr[MAX_THREADS + 1];
__shared__ int hBusAffShr[MAX_THREADS + 1];
int off = starts[id];
int cols = lengths[id];
int score = 0;
int width = cols * iters_ + 2 * (blockDim.x - 1);
int col = -threadIdx.x;
int row = threadIdx.x * 4;
int iter = 0;
Atom atom;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
hBusScrShr[threadIdx.x] = 0;
hBusAffShr[threadIdx.x] = SCORE_MIN;
for (int i = 0; i < width; ++i) {
int del;
int valid = col >= 0 && row < rowsPadded_;
if (valid) {
if (iter != 0 && threadIdx.x == 0) {
atom.up = hBus[off + col];
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
char code = codes[off + col];
char4 rowScores = tex2D(qpTexture, code, row >> 2);
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + rowScores.x;
atom.rScr.x = MAX4(0, mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + rowScores.y;
atom.rScr.y = MAX4(0, mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + rowScores.z;
atom.rScr.z = MAX4(0, mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + rowScores.w;
atom.rScr.w = MAX4(0, mch, del, ins);
atom.rAff.w = ins;
score = max(score, atom.rScr.x);
score = max(score, atom.rScr.y);
score = max(score, atom.rScr.z);
score = max(score, atom.rScr.w);
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (valid) {
if (iter < iters_ - 1 && threadIdx.x == blockDim.x - 1) {
VEC2_ASSIGN(hBus[off + col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
col++;
if (col == cols) {
col = 0;
row += blockDim.x * 4;
iter++;
atom.mch = 0;
atom.lScr = INT4_ZERO;
atom.lAff = INT4_SCORE_MIN;
}
__syncthreads();
}
// write all scores
scoresShr[threadIdx.x] = score;
__syncthreads();
// gather scores
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; ++i) {
score = max(score, scoresShr[i]);
}
scores[id] = score;
}
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// QUERY PROFILE
static QueryProfile* createQueryProfile(Chain* query, Scorer* scorer) {
int rows = chainGetLength(query);
int rowsGpu = rows + (8 - rows % 8) % 8;
int width = scorerGetMaxCode(scorer) + 1;
int height = rowsGpu / 4;
char* row = (char*) malloc(rows * sizeof(char));
chainCopyCodes(query, row);
size_t size = width * height * sizeof(char4);
char4* data = (char4*) malloc(size);
memset(data, 0, size);
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width - 1; ++j) {
char4 scr;
scr.x = i * 4 + 0 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 0], j);
scr.y = i * 4 + 1 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 1], j);
scr.z = i * 4 + 2 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 2], j);
scr.w = i * 4 + 3 >= rows ? 0 : scorerScore(scorer, row[i * 4 + 3], j);
data[i * width + j] = scr;
}
}
free(row);
QueryProfile* queryProfile = (QueryProfile*) malloc(sizeof(QueryProfile));
queryProfile->data = data;
queryProfile->width = width;
queryProfile->height = height;
queryProfile->length = rows;
queryProfile->size = size;
return queryProfile;
}
static void deleteQueryProfile(QueryProfile* queryProfile) {
free(queryProfile->data);
free(queryProfile);
}
static QueryProfileGpu* createQueryProfileGpu(QueryProfile* queryProfile) {
int width = queryProfile->width;
int height = queryProfile->height;
size_t size = queryProfile->size;
char4* data = queryProfile->data;
cudaArray* dataGpu;
CUDA_SAFE_CALL(cudaMallocArray(&dataGpu, &qpTexture.channelDesc, width, height));
CUDA_SAFE_CALL(cudaMemcpyToArray (dataGpu, 0, 0, data, size, TO_GPU));
CUDA_SAFE_CALL(cudaBindTextureToArray(qpTexture, dataGpu));
qpTexture.addressMode[0] = cudaAddressModeClamp;
qpTexture.addressMode[1] = cudaAddressModeClamp;
qpTexture.filterMode = cudaFilterModePoint;
qpTexture.normalized = false;
size_t queryProfileGpuSize = sizeof(QueryProfileGpu);
QueryProfileGpu* queryProfileGpu = (QueryProfileGpu*) malloc(queryProfileGpuSize);
queryProfileGpu->data = dataGpu;
return queryProfileGpu;
}
static void deleteQueryProfileGpu(QueryProfileGpu* queryProfileGpu) {
CUDA_SAFE_CALL(cudaFreeArray(queryProfileGpu->data));
CUDA_SAFE_CALL(cudaUnbindTexture(qpTexture));
free(queryProfileGpu);
}
//------------------------------------------------------------------------------
//******************************************************************************
#endif // __CUDACC__
|
ac387512bf5a94532517932d7448d9052b0b063e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
*
* nullKernelAsync.cu
*
* Microbenchmark for throughput of asynchronous kernel launch.
*
* Build with: nvcc -I ../chLib <options> nullKernelAsync.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chTimer.h"
int
main( int argc, char *argv[] )
{
int mem=atoi(argv[1]);
int vergleich=atoi(argv[2]); // pinned (vergleich==1) memmory or not (else)
int vergleich2=atoi(argv[3]); // H2D (vergleich2==1) or D2H (else)
bool *dmem;
bool *hmem;
hipMalloc((void**)&dmem,mem*1000); //Allocate GPU memory
if (vergleich==1) hmem= (bool*) malloc(mem*1000); //Allocate CPU memory
else hipHostMalloc ((void**) &hmem,mem*1000) ;
const int cIterations = 10000;
fflush( stdout );
chTimerTimestamp start, stop;
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
if (vergleich2==1) hipMemcpy(hmem,dmem,mem*1000,hipMemcpyDeviceToHost);//transfer data
else hipMemcpy(dmem,hmem,mem*1000,hipMemcpyHostToDevice);
}
chTimerGetTime( &stop );
{
double microseconds = 1e6*chTimerElapsedTime( &start, &stop );
double usPerLaunch = microseconds / (float) cIterations;
printf( "%.2f\n", usPerLaunch );
}
hipFree(dmem);
if (vergleich==1) free( hmem);
else hipFree(hmem);
return 0;
}
|
ac387512bf5a94532517932d7448d9052b0b063e.cu
|
/*
*
* nullKernelAsync.cu
*
* Microbenchmark for throughput of asynchronous kernel launch.
*
* Build with: nvcc -I ../chLib <options> nullKernelAsync.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chTimer.h"
int
main( int argc, char *argv[] )
{
int mem=atoi(argv[1]);
int vergleich=atoi(argv[2]); // pinned (vergleich==1) memmory or not (else)
int vergleich2=atoi(argv[3]); // H2D (vergleich2==1) or D2H (else)
bool *dmem;
bool *hmem;
cudaMalloc((void**)&dmem,mem*1000); //Allocate GPU memory
if (vergleich==1) hmem= (bool*) malloc(mem*1000); //Allocate CPU memory
else cudaMallocHost ((void**) &hmem,mem*1000) ;
const int cIterations = 10000;
fflush( stdout );
chTimerTimestamp start, stop;
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
if (vergleich2==1) cudaMemcpy(hmem,dmem,mem*1000,cudaMemcpyDeviceToHost);//transfer data
else cudaMemcpy(dmem,hmem,mem*1000,cudaMemcpyHostToDevice);
}
chTimerGetTime( &stop );
{
double microseconds = 1e6*chTimerElapsedTime( &start, &stop );
double usPerLaunch = microseconds / (float) cIterations;
printf( "%.2f\n", usPerLaunch );
}
cudaFree(dmem);
if (vergleich==1) free( hmem);
else cudaFree(hmem);
return 0;
}
|
ba6b44859095a6224e88e29eb80410e669e577d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraphTex &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=False $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=1 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=False $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=texture $ cuda.use_worklist_slots=True $ cuda.worklist_type=texture";
#define CAVLEN 256
#define BCLEN 1024
#include "dmrggc.inc"
static const int __tb_refine = TB_SIZE;
__global__ void check_triangles(Mesh mesh, unsigned int * bad_triangles, int start, WorklistT in_wl, WorklistT out_wl)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
if (tid == 0)
in_wl.reset_next_slot();
index_type ele_end;
// FP: "1 -> 2;
uint3* el ;
int count = 0;
// FP: "2 -> 3;
ele_end = ((mesh).nelements);
for (index_type ele = start + tid; ele < ele_end; ele += nthreads)
{
if (ele < mesh.nelements)
{
if (!(mesh.isdel[ele] || IS_SEGMENT(mesh.elements[ele])))
{
if (!mesh.isbad[ele])
{
el = &mesh.elements[ele];
mesh.isbad[ele] = (angleLT(mesh, el->x, el->y, el->z) || angleLT(mesh, el->z, el->x, el->y) || angleLT(mesh, el->y, el->z, el->x));
}
if (mesh.isbad[ele])
{
count++;
(out_wl).push(ele);
}
}
}
}
// FP: "15 -> 16;
atomicAdd(bad_triangles, count);
// FP: "16 -> 17;
}
__global__ void __launch_bounds__(TB_SIZE) refine(Mesh mesh, int debg, uint * nnodes, uint * nelements, WorklistT in_wl, WorklistT out_wl, WorklistT re_wl, ExclusiveLocks _ex, GlobalBarrier gb)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
if (tid == 0)
in_wl.reset_next_slot();
index_type wlele_end;
index_type wlele_rup;
index_type wlele_block_size;
index_type wlele_block_start;
// FP: "1 -> 2;
uint cavity[CAVLEN] ;
uint nc = 0;
uint boundary[BCLEN] ;
uint bc = 0;
uint blnodes[BCLEN/4] ;
bool repush = false;
int stage = 0;
int x = 0;
// FP: "2 -> 3;
wlele_end = *((volatile index_type *) (in_wl).dindex);
wlele_rup = ((0) + roundup(((*((volatile index_type *) (in_wl).dindex)) - (0)), (nthreads)));
wlele_block_size = wlele_rup / nthreads;
wlele_block_start = (0 + tid) * wlele_block_size;
for (index_type wlele = wlele_block_start; wlele < (wlele_block_start + wlele_block_size) && (wlele < wlele_rup); wlele++)
{
FORD cx;
FORD cy;
bool pop;
int ele;
nc = 0;
bc = 0;
repush = false;
stage = 0;
pop = (in_wl).pop_id(wlele, ele);
if (pop && ele < mesh.nelements && mesh.isbad[ele] && !mesh.isdel[ele])
{
uint oldcav;
cavity[nc++] = ele;
do
{
oldcav = cavity[0];
cavity[0] = opposite(mesh, ele);
}
while (cavity[0] != oldcav);
if (!build_cavity(mesh, cavity, nc, CAVLEN, boundary, bc, cx, cy))
{
build_cavity(mesh, cavity, nc, CAVLEN, boundary, bc, cx, cy);
}
}
int nodes_added = 0;
int elems_added = 0;
{
_ex.mark_p1(nc, (int *) cavity, tid);
_ex.mark_p1_iterator(2, bc, 4, (int *) boundary, tid);
gb.Sync();
_ex.mark_p2(nc, (int *) cavity, tid);
_ex.mark_p2_iterator(2, bc, 4, (int *) boundary, tid);
gb.Sync();
int _x = 1;
_x &= _ex.owns(nc, (int *) cavity, tid);
_x &= _ex.owns_iterator(2, bc, 4, (int *) boundary, tid);
if (_x)
{
if (nc > 0)
{
nodes_added = 1;
elems_added = (bc >> 2) + (IS_SEGMENT(mesh.elements[cavity[0]]) ? 2 : 0);
uint cnode ;
uint cseg1 = 0;
uint cseg2 = 0;
uint nelements_added ;
uint oldelements ;
uint newelemndx ;
cnode = add_node(mesh, cx, cy, atomicAdd(nnodes, 1));
nelements_added = elems_added;
oldelements = atomicAdd(nelements, nelements_added);
newelemndx = oldelements;
if (IS_SEGMENT(mesh.elements[cavity[0]]))
{
cseg1 = add_segment(mesh, mesh.elements[cavity[0]].x, cnode, newelemndx++);
cseg2 = add_segment(mesh, cnode, mesh.elements[cavity[0]].y, newelemndx++);
}
for (int i = 0; i < bc; i+=4)
{
uint ntri = add_triangle(mesh, boundary[i], boundary[i+1], cnode, boundary[i+2], boundary[i+3], newelemndx++);
}
assert(oldelements + nelements_added == newelemndx);
setup_neighbours(mesh, oldelements, newelemndx);
repush = true;
for (int i = 0; i < nc; i++)
{
mesh.isdel[cavity[i]] = true;
if (cavity[i] == ele)
{
repush = false;
}
}
}
}
else
{
repush = true;
}
}
gb.Sync();
if (repush)
{
(out_wl).push(ele);
continue;
}
}
}
void refine_mesh(ShMesh& mesh, dim3 blocks, dim3 threads)
{
ExclusiveLocks refine_ex_locks(mesh.maxnelements);
static GlobalBarrierLifetime refine_barrier;
static bool refine_barrier_inited;
PipeContextT<WorklistT> pipe;
// FP: "1 -> 2;
Shared<uint> nbad (1);
Mesh gmesh (mesh);
Shared<uint> nelements (1);
Shared<uint> nnodes (1);
int cnbad ;
bool orig = false;
ggc::Timer t ("total");
// FP: "2 -> 3;
// FP: "3 -> 4;
static const size_t refine_residency = maximum_residency(refine, __tb_refine, 0);
static const size_t refine_blocks = GG_MIN(blocks.x, ggc_get_nSM() * refine_residency);
if(!refine_barrier_inited) { refine_barrier.Setup(refine_blocks); refine_barrier_inited = true;};
// FP: "4 -> 5;
find_neighbours_cpu(mesh);
gmesh.refresh(mesh);
*(nelements.cpu_wr_ptr(true)) = mesh.nelements;
*(nnodes.cpu_wr_ptr(true)) = mesh.nnodes;
// FP: "5 -> 6;
pipe = PipeContextT<WorklistT>(mesh.nelements);
{
{
int lastnelements = 0;
// FP: "7 -> 8;
*(nbad.cpu_wr_ptr(true)) = 0;
t.start();
// FP: "8 -> 9;
pipe.out_wl().will_write();
hipLaunchKernelGGL(( check_triangles) , dim3(blocks), dim3(threads), 0, 0, gmesh, nbad.gpu_wr_ptr(), 0, pipe.in_wl(), pipe.out_wl());
pipe.in_wl().swap_slots();
pipe.advance2();
// FP: "9 -> 10;
printf("%d initial bad triangles\n", *(nbad.cpu_rd_ptr()) );;
// FP: "10 -> 11;
while (pipe.in_wl().nitems())
{
lastnelements = gmesh.nelements;
{
pipe.out_wl().will_write();
pipe.re_wl().will_write();
hipLaunchKernelGGL(( refine) , dim3(refine_blocks), dim3(__tb_refine), 0, 0, gmesh, 32, nnodes.gpu_wr_ptr(), nelements.gpu_wr_ptr(), pipe.in_wl(), pipe.out_wl(), pipe.re_wl(), refine_ex_locks, refine_barrier);
pipe.in_wl().swap_slots();
pipe.retry2();
}
gmesh.nnodes = mesh.nnodes = *(nnodes.cpu_rd_ptr());
gmesh.nelements = mesh.nelements = *(nelements.cpu_rd_ptr());
*(nbad.cpu_wr_ptr(true)) = 0;
printf("checking triangles ...\n");
pipe.out_wl().will_write();
if (orig)
hipLaunchKernelGGL(( check_triangles_orig) , dim3(blocks), dim3(threads), 0, 0, gmesh, nbad.gpu_wr_ptr(), lastnelements, pipe.in_wl(), pipe.out_wl());
else
hipLaunchKernelGGL(( check_triangles) , dim3(blocks), dim3(threads), 0, 0, gmesh, nbad.gpu_wr_ptr(), lastnelements, pipe.in_wl(), pipe.out_wl());
pipe.in_wl().swap_slots();
pipe.advance2();
printf("%d bad triangles\n", *(nbad.cpu_rd_ptr()) );
}
// FP: "18 -> 19;
t.stop();
printf("time: %llu ns\n", t.duration());
// FP: "19 -> 20;
{
*(nbad.cpu_wr_ptr(true)) = 0;
// FP: "21 -> 22;
pipe.out_wl().will_write();
hipLaunchKernelGGL(( check_triangles) , dim3(blocks), dim3(threads), 0, 0, gmesh, nbad.gpu_wr_ptr(), 0, pipe.in_wl(), pipe.out_wl());
pipe.in_wl().swap_slots();
pipe.advance2();
// FP: "22 -> 23;
printf("%d (%d) final bad triangles\n", *(nbad.cpu_rd_ptr()), pipe.in_wl().nitems() );
// FP: "23 -> 24;
}
// FP: "20 -> 21;
}
}
pipe.free();
// FP: "6 -> 7;
}
#include "main.inc"
|
ba6b44859095a6224e88e29eb80410e669e577d8.cu
|
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraphTex &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=False $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=1 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=False $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=texture $ cuda.use_worklist_slots=True $ cuda.worklist_type=texture";
#define CAVLEN 256
#define BCLEN 1024
#include "dmrggc.inc"
static const int __tb_refine = TB_SIZE;
__global__ void check_triangles(Mesh mesh, unsigned int * bad_triangles, int start, WorklistT in_wl, WorklistT out_wl)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
if (tid == 0)
in_wl.reset_next_slot();
index_type ele_end;
// FP: "1 -> 2;
uint3* el ;
int count = 0;
// FP: "2 -> 3;
ele_end = ((mesh).nelements);
for (index_type ele = start + tid; ele < ele_end; ele += nthreads)
{
if (ele < mesh.nelements)
{
if (!(mesh.isdel[ele] || IS_SEGMENT(mesh.elements[ele])))
{
if (!mesh.isbad[ele])
{
el = &mesh.elements[ele];
mesh.isbad[ele] = (angleLT(mesh, el->x, el->y, el->z) || angleLT(mesh, el->z, el->x, el->y) || angleLT(mesh, el->y, el->z, el->x));
}
if (mesh.isbad[ele])
{
count++;
(out_wl).push(ele);
}
}
}
}
// FP: "15 -> 16;
atomicAdd(bad_triangles, count);
// FP: "16 -> 17;
}
__global__ void __launch_bounds__(TB_SIZE) refine(Mesh mesh, int debg, uint * nnodes, uint * nelements, WorklistT in_wl, WorklistT out_wl, WorklistT re_wl, ExclusiveLocks _ex, GlobalBarrier gb)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
if (tid == 0)
in_wl.reset_next_slot();
index_type wlele_end;
index_type wlele_rup;
index_type wlele_block_size;
index_type wlele_block_start;
// FP: "1 -> 2;
uint cavity[CAVLEN] ;
uint nc = 0;
uint boundary[BCLEN] ;
uint bc = 0;
uint blnodes[BCLEN/4] ;
bool repush = false;
int stage = 0;
int x = 0;
// FP: "2 -> 3;
wlele_end = *((volatile index_type *) (in_wl).dindex);
wlele_rup = ((0) + roundup(((*((volatile index_type *) (in_wl).dindex)) - (0)), (nthreads)));
wlele_block_size = wlele_rup / nthreads;
wlele_block_start = (0 + tid) * wlele_block_size;
for (index_type wlele = wlele_block_start; wlele < (wlele_block_start + wlele_block_size) && (wlele < wlele_rup); wlele++)
{
FORD cx;
FORD cy;
bool pop;
int ele;
nc = 0;
bc = 0;
repush = false;
stage = 0;
pop = (in_wl).pop_id(wlele, ele);
if (pop && ele < mesh.nelements && mesh.isbad[ele] && !mesh.isdel[ele])
{
uint oldcav;
cavity[nc++] = ele;
do
{
oldcav = cavity[0];
cavity[0] = opposite(mesh, ele);
}
while (cavity[0] != oldcav);
if (!build_cavity(mesh, cavity, nc, CAVLEN, boundary, bc, cx, cy))
{
build_cavity(mesh, cavity, nc, CAVLEN, boundary, bc, cx, cy);
}
}
int nodes_added = 0;
int elems_added = 0;
{
_ex.mark_p1(nc, (int *) cavity, tid);
_ex.mark_p1_iterator(2, bc, 4, (int *) boundary, tid);
gb.Sync();
_ex.mark_p2(nc, (int *) cavity, tid);
_ex.mark_p2_iterator(2, bc, 4, (int *) boundary, tid);
gb.Sync();
int _x = 1;
_x &= _ex.owns(nc, (int *) cavity, tid);
_x &= _ex.owns_iterator(2, bc, 4, (int *) boundary, tid);
if (_x)
{
if (nc > 0)
{
nodes_added = 1;
elems_added = (bc >> 2) + (IS_SEGMENT(mesh.elements[cavity[0]]) ? 2 : 0);
uint cnode ;
uint cseg1 = 0;
uint cseg2 = 0;
uint nelements_added ;
uint oldelements ;
uint newelemndx ;
cnode = add_node(mesh, cx, cy, atomicAdd(nnodes, 1));
nelements_added = elems_added;
oldelements = atomicAdd(nelements, nelements_added);
newelemndx = oldelements;
if (IS_SEGMENT(mesh.elements[cavity[0]]))
{
cseg1 = add_segment(mesh, mesh.elements[cavity[0]].x, cnode, newelemndx++);
cseg2 = add_segment(mesh, cnode, mesh.elements[cavity[0]].y, newelemndx++);
}
for (int i = 0; i < bc; i+=4)
{
uint ntri = add_triangle(mesh, boundary[i], boundary[i+1], cnode, boundary[i+2], boundary[i+3], newelemndx++);
}
assert(oldelements + nelements_added == newelemndx);
setup_neighbours(mesh, oldelements, newelemndx);
repush = true;
for (int i = 0; i < nc; i++)
{
mesh.isdel[cavity[i]] = true;
if (cavity[i] == ele)
{
repush = false;
}
}
}
}
else
{
repush = true;
}
}
gb.Sync();
if (repush)
{
(out_wl).push(ele);
continue;
}
}
}
void refine_mesh(ShMesh& mesh, dim3 blocks, dim3 threads)
{
ExclusiveLocks refine_ex_locks(mesh.maxnelements);
static GlobalBarrierLifetime refine_barrier;
static bool refine_barrier_inited;
PipeContextT<WorklistT> pipe;
// FP: "1 -> 2;
Shared<uint> nbad (1);
Mesh gmesh (mesh);
Shared<uint> nelements (1);
Shared<uint> nnodes (1);
int cnbad ;
bool orig = false;
ggc::Timer t ("total");
// FP: "2 -> 3;
// FP: "3 -> 4;
static const size_t refine_residency = maximum_residency(refine, __tb_refine, 0);
static const size_t refine_blocks = GG_MIN(blocks.x, ggc_get_nSM() * refine_residency);
if(!refine_barrier_inited) { refine_barrier.Setup(refine_blocks); refine_barrier_inited = true;};
// FP: "4 -> 5;
find_neighbours_cpu(mesh);
gmesh.refresh(mesh);
*(nelements.cpu_wr_ptr(true)) = mesh.nelements;
*(nnodes.cpu_wr_ptr(true)) = mesh.nnodes;
// FP: "5 -> 6;
pipe = PipeContextT<WorklistT>(mesh.nelements);
{
{
int lastnelements = 0;
// FP: "7 -> 8;
*(nbad.cpu_wr_ptr(true)) = 0;
t.start();
// FP: "8 -> 9;
pipe.out_wl().will_write();
check_triangles <<<blocks, threads>>>(gmesh, nbad.gpu_wr_ptr(), 0, pipe.in_wl(), pipe.out_wl());
pipe.in_wl().swap_slots();
pipe.advance2();
// FP: "9 -> 10;
printf("%d initial bad triangles\n", *(nbad.cpu_rd_ptr()) );;
// FP: "10 -> 11;
while (pipe.in_wl().nitems())
{
lastnelements = gmesh.nelements;
{
pipe.out_wl().will_write();
pipe.re_wl().will_write();
refine <<<refine_blocks, __tb_refine>>>(gmesh, 32, nnodes.gpu_wr_ptr(), nelements.gpu_wr_ptr(), pipe.in_wl(), pipe.out_wl(), pipe.re_wl(), refine_ex_locks, refine_barrier);
pipe.in_wl().swap_slots();
pipe.retry2();
}
gmesh.nnodes = mesh.nnodes = *(nnodes.cpu_rd_ptr());
gmesh.nelements = mesh.nelements = *(nelements.cpu_rd_ptr());
*(nbad.cpu_wr_ptr(true)) = 0;
printf("checking triangles ...\n");
pipe.out_wl().will_write();
if (orig)
check_triangles_orig <<<blocks, threads>>>(gmesh, nbad.gpu_wr_ptr(), lastnelements, pipe.in_wl(), pipe.out_wl());
else
check_triangles <<<blocks, threads>>>(gmesh, nbad.gpu_wr_ptr(), lastnelements, pipe.in_wl(), pipe.out_wl());
pipe.in_wl().swap_slots();
pipe.advance2();
printf("%d bad triangles\n", *(nbad.cpu_rd_ptr()) );
}
// FP: "18 -> 19;
t.stop();
printf("time: %llu ns\n", t.duration());
// FP: "19 -> 20;
{
*(nbad.cpu_wr_ptr(true)) = 0;
// FP: "21 -> 22;
pipe.out_wl().will_write();
check_triangles <<<blocks, threads>>>(gmesh, nbad.gpu_wr_ptr(), 0, pipe.in_wl(), pipe.out_wl());
pipe.in_wl().swap_slots();
pipe.advance2();
// FP: "22 -> 23;
printf("%d (%d) final bad triangles\n", *(nbad.cpu_rd_ptr()), pipe.in_wl().nitems() );
// FP: "23 -> 24;
}
// FP: "20 -> 21;
}
}
pipe.free();
// FP: "6 -> 7;
}
#include "main.inc"
|
8773a82bb51d3bc38de17b54d551a0ad5f68caaa.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cumlHandle.hpp>
#include <common/cuml_comms_int.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <cuml/linear_model/ridge_mg.hpp>
#include <linalg/add.cuh>
#include <linalg/gemm.cuh>
#include <matrix/math.cuh>
#include <matrix/matrix.cuh>
#include <opg/linalg/mv_aTb.hpp>
#include <opg/linalg/svd.hpp>
#include <opg/stats/mean.hpp>
using namespace MLCommon;
namespace ML {
namespace Ridge {
namespace opg {
template <typename T>
void ridgeSolve(const cumlHandle &handle, T *S, T *V,
std::vector<Matrix::Data<T> *> &U,
const Matrix::PartDescriptor &UDesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *w, hipStream_t *streams, int n_streams,
bool verbose) {
auto cublasH = handle.getImpl().getCublasHandle();
auto cusolverH = handle.getImpl().getcusolverDnHandle();
const MLCommon::cumlCommunicator &comm = handle.getImpl().getCommunicator();
const std::shared_ptr<deviceAllocator> allocator =
handle.getImpl().getDeviceAllocator();
// Implements this: w = V * inv(S^2 + *I) * S * U^T * b
T *S_nnz;
T alp = T(1);
T beta = T(0);
T thres = T(1e-10);
Matrix::setSmallValuesZero(S, UDesc.N, streams[0], thres);
// TO-DO: Update to use `device_buffer` here
// Tracking issue: https://github.com/rapidsai/cuml/issues/2524
allocate(S_nnz, UDesc.N, true);
copy(S_nnz, S, UDesc.N, streams[0]);
Matrix::power(S_nnz, UDesc.N, streams[0]);
LinAlg::addScalar(S_nnz, S_nnz, alpha[0], UDesc.N, streams[0]);
Matrix::matrixVectorBinaryDivSkipZero(S, S_nnz, size_t(1), UDesc.N, false,
true, streams[0], true);
Matrix::matrixVectorBinaryMult(V, S, UDesc.N, UDesc.N, false, true,
streams[0]);
Matrix::Data<T> S_nnz_data;
S_nnz_data.totalSize = UDesc.N;
S_nnz_data.ptr = S_nnz;
LinAlg::opg::mv_aTb(S_nnz_data, U, UDesc, b, comm, allocator, streams,
n_streams, cublasH);
LinAlg::gemm(V, UDesc.N, UDesc.N, S_nnz, w, UDesc.N, 1, HIPBLAS_OP_N,
HIPBLAS_OP_N, alp, beta, cublasH, streams[0]);
CUDA_CHECK(hipFree(S_nnz));
}
template <typename T>
void ridgeEig(cumlHandle &handle, const std::vector<Matrix::Data<T> *> &A,
const Matrix::PartDescriptor &ADesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *coef, hipStream_t *streams, int n_streams,
bool verbose) {
const MLCommon::cumlCommunicator &comm = handle.getImpl().getCommunicator();
const hipblasHandle_t cublas_handle = handle.getImpl().getCublasHandle();
const hipsolverDnHandle_t cusolver_handle =
handle.getImpl().getcusolverDnHandle();
const std::shared_ptr<deviceAllocator> allocator =
handle.getImpl().getDeviceAllocator();
int rank = comm.getRank();
device_buffer<T> S(allocator, streams[0], ADesc.N);
device_buffer<T> V(allocator, streams[0], ADesc.N * ADesc.N);
std::vector<Matrix::Data<T> *> U;
std::vector<Matrix::Data<T>> U_temp;
std::vector<Matrix::RankSizePair *> partsToRanks = ADesc.blocksOwnedBy(rank);
size_t total_size = 0;
for (int i = 0; i < partsToRanks.size(); i++) {
total_size += partsToRanks[i]->size;
}
total_size = total_size * ADesc.N;
device_buffer<T> U_parts(allocator, streams[0], total_size);
T *curr_ptr = U_parts.data();
for (int i = 0; i < partsToRanks.size(); i++) {
Matrix::Data<T> d;
d.totalSize = partsToRanks[i]->size;
d.ptr = curr_ptr;
curr_ptr = curr_ptr + (partsToRanks[i]->size * ADesc.N);
U_temp.push_back(d);
}
for (int i = 0; i < A.size(); i++) {
U.push_back(&(U_temp[i]));
}
LinAlg::opg::svdEig(A, ADesc, U, S.data(), V.data(), comm, allocator, streams,
n_streams, cublas_handle, cusolver_handle);
ridgeSolve(handle, S.data(), V.data(), U, ADesc, b, alpha, n_alpha, coef,
streams, n_streams, verbose);
}
template <typename T>
void fit_impl(cumlHandle &handle, std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, hipStream_t *streams, int n_streams, bool verbose) {
const std::shared_ptr<deviceAllocator> allocator =
handle.getImpl().getDeviceAllocator();
device_buffer<T> mu_input(allocator, streams[0]);
device_buffer<T> norm2_input(allocator, streams[0]);
device_buffer<T> mu_labels(allocator, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) {
norm2_input.resize(input_desc.N, streams[0]);
}
GLM::opg::preProcessData(handle, input_data, input_desc, labels,
mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
ridgeEig(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
streams, n_streams, verbose);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef,
intercept, mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ridge regression
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param n_rows: number of rows of the input data
* @input param n_cols: number of cols of the input data
* @input param labels: labels data
* @input param alpha: ridge parameter
* @input param n_alpha: number of ridge parameters. Only one parameter is supported right now.
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(cumlHandle &handle, std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
int rank = handle.getImpl().getCommunicator().getRank();
// TODO: These streams should come from cumlHandle
// Tracking issue: https://github.com/rapidsai/cuml/issues/2470
int n_streams = input_desc.blocksOwnedBy(rank).size();
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, streams, n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(cumlHandle &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc, T *coef, T intercept,
std::vector<Matrix::Data<T> *> &preds, hipStream_t *streams,
int n_streams, bool verbose) {
std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (int i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef,
preds[i]->ptr, local_blocks[i]->size, size_t(1), HIPBLAS_OP_N,
HIPBLAS_OP_N, alpha, beta, handle.getImpl().getCublasHandle(),
streams[si]);
LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept,
local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(cumlHandle &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<T> **input, size_t n_rows,
size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds,
bool verbose) {
int rank = handle.getImpl().getCommunicator().getRank();
std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes,
rank_sizes + n_parts);
std::vector<Matrix::Data<T> *> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts);
// TODO: These streams should come from cumlHandle
int n_streams = n_parts;
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
predict_impl(handle, input_data, input_desc, coef, intercept, preds_data,
streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
void fit(cumlHandle &handle, std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *alpha, int n_alpha,
float *coef, float *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void fit(cumlHandle &handle, std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *alpha,
int n_alpha, double *coef, double *intercept, bool fit_intercept,
bool normalize, int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void predict(cumlHandle &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<float> **input, size_t n_rows,
size_t n_cols, float *coef, float intercept,
Matrix::Data<float> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
void predict(cumlHandle &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<double> **input, size_t n_rows,
size_t n_cols, double *coef, double intercept,
Matrix::Data<double> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
} // namespace opg
} // namespace Ridge
} // namespace ML
|
8773a82bb51d3bc38de17b54d551a0ad5f68caaa.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cumlHandle.hpp>
#include <common/cuml_comms_int.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <cuml/linear_model/ridge_mg.hpp>
#include <linalg/add.cuh>
#include <linalg/gemm.cuh>
#include <matrix/math.cuh>
#include <matrix/matrix.cuh>
#include <opg/linalg/mv_aTb.hpp>
#include <opg/linalg/svd.hpp>
#include <opg/stats/mean.hpp>
using namespace MLCommon;
namespace ML {
namespace Ridge {
namespace opg {
template <typename T>
void ridgeSolve(const cumlHandle &handle, T *S, T *V,
std::vector<Matrix::Data<T> *> &U,
const Matrix::PartDescriptor &UDesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *w, cudaStream_t *streams, int n_streams,
bool verbose) {
auto cublasH = handle.getImpl().getCublasHandle();
auto cusolverH = handle.getImpl().getcusolverDnHandle();
const MLCommon::cumlCommunicator &comm = handle.getImpl().getCommunicator();
const std::shared_ptr<deviceAllocator> allocator =
handle.getImpl().getDeviceAllocator();
// Implements this: w = V * inv(S^2 + λ*I) * S * U^T * b
T *S_nnz;
T alp = T(1);
T beta = T(0);
T thres = T(1e-10);
Matrix::setSmallValuesZero(S, UDesc.N, streams[0], thres);
// TO-DO: Update to use `device_buffer` here
// Tracking issue: https://github.com/rapidsai/cuml/issues/2524
allocate(S_nnz, UDesc.N, true);
copy(S_nnz, S, UDesc.N, streams[0]);
Matrix::power(S_nnz, UDesc.N, streams[0]);
LinAlg::addScalar(S_nnz, S_nnz, alpha[0], UDesc.N, streams[0]);
Matrix::matrixVectorBinaryDivSkipZero(S, S_nnz, size_t(1), UDesc.N, false,
true, streams[0], true);
Matrix::matrixVectorBinaryMult(V, S, UDesc.N, UDesc.N, false, true,
streams[0]);
Matrix::Data<T> S_nnz_data;
S_nnz_data.totalSize = UDesc.N;
S_nnz_data.ptr = S_nnz;
LinAlg::opg::mv_aTb(S_nnz_data, U, UDesc, b, comm, allocator, streams,
n_streams, cublasH);
LinAlg::gemm(V, UDesc.N, UDesc.N, S_nnz, w, UDesc.N, 1, CUBLAS_OP_N,
CUBLAS_OP_N, alp, beta, cublasH, streams[0]);
CUDA_CHECK(cudaFree(S_nnz));
}
template <typename T>
void ridgeEig(cumlHandle &handle, const std::vector<Matrix::Data<T> *> &A,
const Matrix::PartDescriptor &ADesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *coef, cudaStream_t *streams, int n_streams,
bool verbose) {
const MLCommon::cumlCommunicator &comm = handle.getImpl().getCommunicator();
const cublasHandle_t cublas_handle = handle.getImpl().getCublasHandle();
const cusolverDnHandle_t cusolver_handle =
handle.getImpl().getcusolverDnHandle();
const std::shared_ptr<deviceAllocator> allocator =
handle.getImpl().getDeviceAllocator();
int rank = comm.getRank();
device_buffer<T> S(allocator, streams[0], ADesc.N);
device_buffer<T> V(allocator, streams[0], ADesc.N * ADesc.N);
std::vector<Matrix::Data<T> *> U;
std::vector<Matrix::Data<T>> U_temp;
std::vector<Matrix::RankSizePair *> partsToRanks = ADesc.blocksOwnedBy(rank);
size_t total_size = 0;
for (int i = 0; i < partsToRanks.size(); i++) {
total_size += partsToRanks[i]->size;
}
total_size = total_size * ADesc.N;
device_buffer<T> U_parts(allocator, streams[0], total_size);
T *curr_ptr = U_parts.data();
for (int i = 0; i < partsToRanks.size(); i++) {
Matrix::Data<T> d;
d.totalSize = partsToRanks[i]->size;
d.ptr = curr_ptr;
curr_ptr = curr_ptr + (partsToRanks[i]->size * ADesc.N);
U_temp.push_back(d);
}
for (int i = 0; i < A.size(); i++) {
U.push_back(&(U_temp[i]));
}
LinAlg::opg::svdEig(A, ADesc, U, S.data(), V.data(), comm, allocator, streams,
n_streams, cublas_handle, cusolver_handle);
ridgeSolve(handle, S.data(), V.data(), U, ADesc, b, alpha, n_alpha, coef,
streams, n_streams, verbose);
}
template <typename T>
void fit_impl(cumlHandle &handle, std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, cudaStream_t *streams, int n_streams, bool verbose) {
const std::shared_ptr<deviceAllocator> allocator =
handle.getImpl().getDeviceAllocator();
device_buffer<T> mu_input(allocator, streams[0]);
device_buffer<T> norm2_input(allocator, streams[0]);
device_buffer<T> mu_labels(allocator, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) {
norm2_input.resize(input_desc.N, streams[0]);
}
GLM::opg::preProcessData(handle, input_data, input_desc, labels,
mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
ridgeEig(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
streams, n_streams, verbose);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef,
intercept, mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ridge regression
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param n_rows: number of rows of the input data
* @input param n_cols: number of cols of the input data
* @input param labels: labels data
* @input param alpha: ridge parameter
* @input param n_alpha: number of ridge parameters. Only one parameter is supported right now.
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(cumlHandle &handle, std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
int rank = handle.getImpl().getCommunicator().getRank();
// TODO: These streams should come from cumlHandle
// Tracking issue: https://github.com/rapidsai/cuml/issues/2470
int n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, streams, n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(cumlHandle &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc, T *coef, T intercept,
std::vector<Matrix::Data<T> *> &preds, cudaStream_t *streams,
int n_streams, bool verbose) {
std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (int i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef,
preds[i]->ptr, local_blocks[i]->size, size_t(1), CUBLAS_OP_N,
CUBLAS_OP_N, alpha, beta, handle.getImpl().getCublasHandle(),
streams[si]);
LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept,
local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(cumlHandle &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<T> **input, size_t n_rows,
size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds,
bool verbose) {
int rank = handle.getImpl().getCommunicator().getRank();
std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes,
rank_sizes + n_parts);
std::vector<Matrix::Data<T> *> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts);
// TODO: These streams should come from cumlHandle
int n_streams = n_parts;
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
predict_impl(handle, input_data, input_desc, coef, intercept, preds_data,
streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
void fit(cumlHandle &handle, std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *alpha, int n_alpha,
float *coef, float *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void fit(cumlHandle &handle, std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *alpha,
int n_alpha, double *coef, double *intercept, bool fit_intercept,
bool normalize, int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void predict(cumlHandle &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<float> **input, size_t n_rows,
size_t n_cols, float *coef, float intercept,
Matrix::Data<float> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
void predict(cumlHandle &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<double> **input, size_t n_rows,
size_t n_cols, double *coef, double intercept,
Matrix::Data<double> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
} // namespace opg
} // namespace Ridge
} // namespace ML
|
c6ce58356812854af9376d7bad3efd6d9800a661.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <dacrt/dacrt.h>
#include <util/cutimer.h>
extern "C"
__global__ void dacrtBruteForce(TriangleArray dev_triangles, int num_triangles, RayArray dev_rays, int num_rays,
int* tri_idx_array, int tricnt, int* ray_idx_array, int raycnt, float* maxts, int* hitids);
extern "C" __global__ void updateMinKernel(int* ray_id, float* min_hits, int* minhit_ids, float* global_min, int* global_hits, int num_rays);
extern "C" __global__ void rayPartitionKernel(float3* o, float3* dir, int* ray_ids, int num_rays, float3 bmin, float3 bmax, int* occupy);
|
c6ce58356812854af9376d7bad3efd6d9800a661.cu
|
#include <dacrt/dacrt.h>
#include <util/cutimer.h>
extern "C"
__global__ void dacrtBruteForce(TriangleArray dev_triangles, int num_triangles, RayArray dev_rays, int num_rays,
int* tri_idx_array, int tricnt, int* ray_idx_array, int raycnt, float* maxts, int* hitids);
extern "C" __global__ void updateMinKernel(int* ray_id, float* min_hits, int* minhit_ids, float* global_min, int* global_hits, int num_rays);
extern "C" __global__ void rayPartitionKernel(float3* o, float3* dir, int* ray_ids, int num_rays, float3 bmin, float3 bmax, int* occupy);
|
1d04cfa6d5ee0cbf49ff78765e082cf9f8929c12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vhashing.h>
#include <unordered_map>
#include <utility>
#include <random>
#include <hip/hip_runtime.h>
#include <glog/logging.h>
using std::vector;
using std::default_random_engine;
struct Voxel {
float sdf;
};
struct VoxelBlock {
Voxel voxels[8*8*8];
};
bool operator==(const VoxelBlock &a, const VoxelBlock &b) {
for (int i=0; i<8*8*8; i++) {
if (a.voxels[i].sdf != b.voxels[i].sdf) {
return false;
}
}
return true;
}
struct BlockHasher {
__device__ __host__
size_t operator()(int3 patch) const {
const size_t p[] = {
73856093,
19349669,
83492791
};
return ((size_t)patch.x * p[0]) ^
((size_t)patch.y * p[1]) ^
((size_t)patch.z * p[2]);
}
};
struct BlockEqual {
__device__ __host__
bool operator()(int3 patch1, int3 patch2) const {
return patch1.x == patch2.x &&
patch1.y == patch2.y &&
patch1.z == patch2.z;
}
};
typedef vhashing::HashTableBase<int3, VoxelBlock, BlockHasher, BlockEqual> HTBase;
__global__
void kernel(int3 *keys,
VoxelBlock *values,
int n,
vhashing::HashTableBase<int3, VoxelBlock, BlockHasher, BlockEqual> bm) {
int base = blockDim.x * blockIdx.x + threadIdx.x;
if (base >= n) {
return;
}
bm[keys[base]] = values[base];
}
struct set_minus_one_if_x_is_even {
__device__ __host__
void operator() (
const int3 &key,
VoxelBlock &value)
{
if (key.x % 2 == 0) {
value.voxels[0].sdf = -1;
}
}
};
/**
Creates a HashTable with capacity of < 20000.
insert voxels to hashtable until close to capacity.
Tests the INSERT on the linked-list implementation
*/
int main() {
vhashing::HashTable<int3, VoxelBlock, BlockHasher, BlockEqual, vhashing::device_memspace>
blocks(10000, 2, 19997, int3{999999, 999999, 999999});
vector< int3 > keys;
vector< VoxelBlock > values;
default_random_engine dre;
for (int i=0; i<19000; i++) {
int3 k = make_int3( dre() % 80000, dre() % 80000, dre() % 80000 );
VoxelBlock d;
for (int j=0; j<8*8*8; j++) {
d.voxels[j].sdf = dre();
}
values.push_back(d);
keys.push_back(k);
}
printf("Generated values\n");
// insert into blockmap
{
int3 *dkeys;
VoxelBlock *dvalues;
cudaSafeCall(hipMalloc(&dkeys, sizeof(int3) * keys.size()));
cudaSafeCall(hipMalloc(&dvalues, sizeof(VoxelBlock) * keys.size()));
cudaSafeCall(hipMemcpy(dkeys, &keys[0], sizeof(int3) * keys.size(), hipMemcpyHostToDevice));
cudaSafeCall(hipMemcpy(dvalues, &values[0], sizeof(VoxelBlock) * keys.size(), hipMemcpyHostToDevice));
printf("Running kernel\n");
int numJobs = keys.size();
int tpb = 16;
int numBlocks = (numJobs + (tpb-1)) / tpb;
hipLaunchKernelGGL(( kernel), dim3(numBlocks), dim3(tpb), 0, 0, dkeys, dvalues, keys.size(), blocks);
cudaSafeCall(hipDeviceSynchronize());
}
printf("Copying back\n");
{
// Apply function to all blocks...
blocks.Apply(set_minus_one_if_x_is_even());
}
// stream in
vhashing::HashTable<int3, VoxelBlock, BlockHasher, BlockEqual, vhashing::std_memspace>
bmh(blocks);
// check
for (int i=0; i<keys.size(); i++) {
int3 key = keys[i];
VoxelBlock &value = bmh[key];
if (key.x % 2 == 0) {
CHECK(value.voxels[0].sdf == -1);
}
else {
CHECK(value.voxels[0].sdf != -1);
}
}
return 0;
}
|
1d04cfa6d5ee0cbf49ff78765e082cf9f8929c12.cu
|
#include <vhashing.h>
#include <unordered_map>
#include <utility>
#include <random>
#include <cuda_runtime.h>
#include <glog/logging.h>
using std::vector;
using std::default_random_engine;
struct Voxel {
float sdf;
};
struct VoxelBlock {
Voxel voxels[8*8*8];
};
bool operator==(const VoxelBlock &a, const VoxelBlock &b) {
for (int i=0; i<8*8*8; i++) {
if (a.voxels[i].sdf != b.voxels[i].sdf) {
return false;
}
}
return true;
}
struct BlockHasher {
__device__ __host__
size_t operator()(int3 patch) const {
const size_t p[] = {
73856093,
19349669,
83492791
};
return ((size_t)patch.x * p[0]) ^
((size_t)patch.y * p[1]) ^
((size_t)patch.z * p[2]);
}
};
struct BlockEqual {
__device__ __host__
bool operator()(int3 patch1, int3 patch2) const {
return patch1.x == patch2.x &&
patch1.y == patch2.y &&
patch1.z == patch2.z;
}
};
typedef vhashing::HashTableBase<int3, VoxelBlock, BlockHasher, BlockEqual> HTBase;
__global__
void kernel(int3 *keys,
VoxelBlock *values,
int n,
vhashing::HashTableBase<int3, VoxelBlock, BlockHasher, BlockEqual> bm) {
int base = blockDim.x * blockIdx.x + threadIdx.x;
if (base >= n) {
return;
}
bm[keys[base]] = values[base];
}
struct set_minus_one_if_x_is_even {
__device__ __host__
void operator() (
const int3 &key,
VoxelBlock &value)
{
if (key.x % 2 == 0) {
value.voxels[0].sdf = -1;
}
}
};
/**
Creates a HashTable with capacity of < 20000.
insert voxels to hashtable until close to capacity.
Tests the INSERT on the linked-list implementation
*/
int main() {
vhashing::HashTable<int3, VoxelBlock, BlockHasher, BlockEqual, vhashing::device_memspace>
blocks(10000, 2, 19997, int3{999999, 999999, 999999});
vector< int3 > keys;
vector< VoxelBlock > values;
default_random_engine dre;
for (int i=0; i<19000; i++) {
int3 k = make_int3( dre() % 80000, dre() % 80000, dre() % 80000 );
VoxelBlock d;
for (int j=0; j<8*8*8; j++) {
d.voxels[j].sdf = dre();
}
values.push_back(d);
keys.push_back(k);
}
printf("Generated values\n");
// insert into blockmap
{
int3 *dkeys;
VoxelBlock *dvalues;
cudaSafeCall(cudaMalloc(&dkeys, sizeof(int3) * keys.size()));
cudaSafeCall(cudaMalloc(&dvalues, sizeof(VoxelBlock) * keys.size()));
cudaSafeCall(cudaMemcpy(dkeys, &keys[0], sizeof(int3) * keys.size(), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(dvalues, &values[0], sizeof(VoxelBlock) * keys.size(), cudaMemcpyHostToDevice));
printf("Running kernel\n");
int numJobs = keys.size();
int tpb = 16;
int numBlocks = (numJobs + (tpb-1)) / tpb;
kernel<<<numBlocks, tpb>>>(dkeys, dvalues, keys.size(), blocks);
cudaSafeCall(cudaDeviceSynchronize());
}
printf("Copying back\n");
{
// Apply function to all blocks...
blocks.Apply(set_minus_one_if_x_is_even());
}
// stream in
vhashing::HashTable<int3, VoxelBlock, BlockHasher, BlockEqual, vhashing::std_memspace>
bmh(blocks);
// check
for (int i=0; i<keys.size(); i++) {
int3 key = keys[i];
VoxelBlock &value = bmh[key];
if (key.x % 2 == 0) {
CHECK(value.voxels[0].sdf == -1);
}
else {
CHECK(value.voxels[0].sdf != -1);
}
}
return 0;
}
|
f62e40bc81dfb0aed9389ddd065e14c9e238c6eb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include "gpu.h"
#include "io.h"
#include "neuron.h"
#include "syn.h"
static const char * dynParamFile = "dynParam.bin";
static const char * dynStateFile = "dynState.bin";
int neuronNew(neuron_t * neuron){
// parameters
const float * dynParam;
dynParam = (const float *) calloc(
neuron->numNeurons * DYN_PARAM_LEN,
sizeof(float)
);
if(!dynParam) return -1;
// state
float * dynState;
dynState = (float *) calloc(
neuron->numNeurons * DYN_STATE_LEN,
sizeof(float)
);
if(!dynState) return -1;
// write back
neuron->dynParam = dynParam;
neuron->dynState = dynState;
return 0;
}
int neuronCopyToGPU(neuron_t * neuron){
int error;
// neuronal parameters
const float * dynParam;
error = gpuCopyTo(
neuron->numNeurons * DYN_PARAM_LEN * sizeof(float),
(const void *) neuron->dynParam,
(void **) &dynParam
);
if(error){
printf("Could not copy neuron parameters to GPU\n");
return -1;
}
// neuronal state
float * dynState;
error = gpuCopyTo(
neuron->numNeurons * DYN_STATE_LEN * sizeof(float),
(const void *) neuron->dynState,
(void **) &dynState
);
if(error){
printf("Could not copy neuron states to GPU\n");
return -1;
}
// write back
neuron->dynParam = dynParam;
neuron->dynState = dynState;
return 0;
}
int neuronRead(neuron_t * neuron){
int error;
// read dynamics parameters
error = ioReadMat(
dynParamFile,
neuron->numNeurons,
DYN_PARAM_LEN,
(float *) neuron->dynParam
);
if(error){
printf("Failed to read %s\n", dynParamFile);
return -1;
}
// read dynamics state matrix
error = ioReadMat(
dynStateFile,
neuron->numNeurons,
DYN_STATE_LEN,
neuron->dynState
);
if(error){
printf("Failed to read %s\n", dynStateFile);
return -1;
}
return 0;
}
int neuronReadSize(int * pNumNeurons){
int error;
int rows;
int cols;
// check neuron parameters
error = ioReadMatSize(dynParamFile, &rows, &cols);
if(error) return -1;
if(cols != DYN_PARAM_LEN){
printf("Invalid column count in %s\n", dynParamFile);
return -1;
}
// this should be a constant
const int numNeurons = rows;
// check neuron state
error = ioReadMatSize(dynStateFile, &rows, &cols);
if(error) return -1;
if(rows != numNeurons){
printf("Invalid rows count in %s\n", dynStateFile);
return -1;
}
if(cols != DYN_STATE_LEN){
printf("Invalid column count in %s\n", dynStateFile);
return -1;
}
// write back
*pNumNeurons = numNeurons;
// report success
return 0;
}
// Faraday constant (C / mol)
#define C_F 96485.34f
// ideal gas constant (V C / K mol)
#define C_R 8.31446f
// temperature (K)
#define C_T 295.0f
// xi
#define C_xi (96485.34f / (8.31446f * 295.0f))
// internal Na concentration (mol / m^3)
#define C_cNaI 14.0f
// external Na concentration (mol / m^3)
#define C_cNaO 114.0f
// internal K concentration (mol / m^3)
#define C_cKI 120.0f
// external K concentration (mol / m^3)
#define C_cKO 2.5f
// leakage reversal potential (V)
#define C_eL -70e-3f
// excitatory reversal potential (V)
#define C_eExc 0.0f
// inhibitory reversal potential (V)
#define C_eInh -65e-3f
// membrane capacitance (F / m^2)
#define C_Cm 7e-12f
// membrane area (C / mol)
#define C_A 100e-12f
__global__ void neuronUpdateKernel(
const int numNeurons,
const float * __restrict__ cond,
const float * __restrict__ dynParam,
float * __restrict__ dynState,
float * __restrict__ firingVec
){
// neuron id
const int nId = blockDim.x * blockIdx.x + threadIdx.x;
// let's not exaggerate
if(nId >= numNeurons) return;
// current state
float v = dynState[DYN_STATE_V * numNeurons + nId];
float m = dynState[DYN_STATE_M * numNeurons + nId];
float h = dynState[DYN_STATE_H * numNeurons + nId];
float n = dynState[DYN_STATE_N * numNeurons + nId];
// parameters
const float gL = dynParam[DYN_PARAM_GL * numNeurons + nId];
const float pNa = dynParam[DYN_PARAM_PNA * numNeurons + nId];
const float pK = dynParam[DYN_PARAM_PK * numNeurons + nId];
const float type = dynParam[DYN_PARAM_TYPE * numNeurons + nId];
// conductances
const float gExc = cond[SYN_TYPE_EXC * numNeurons + nId];
const float gInh = cond[SYN_TYPE_INH * numNeurons + nId];
// total current (A / m^2)
float Itotal;
// stimulation current (A / m^2)
float Istim = 0.0f;
// add stimulation
if(type < 0.5f){
// excitatory neuron
Istim = 5.5e-12f;
}else{
// inhibitory neuron
Istim = 10e-12f;
}
float dt = 1e-6f;
float firing = 0.0f;
for(int i = 0; i < 1000; i++){
float expVal = expf(v * C_xi);
/*
** TODO
** This is a very crude way to prevent a division by zero.
** Possible solutions:
** - Check for zero voltage before expVal
** - Try to use de l'Hpital's rule
*/
float Ina;
Ina = C_A * C_F * C_xi * m * m * h * pNa;
Ina *= C_cNaO - C_cNaI * expVal;
float Ik;
Ik = C_A * C_F * C_xi * n * n * pK;
Ik *= C_cKO - C_cKI * expVal;
/*
** Avoid division by zero and use de l'Hpital's rule
** to calculate Ina and Ik.
*/
if(expVal == 1.0f){
Ina *= 1.0f / (1.0f - C_xi);
Ik *= 1.0f / (1.0f - C_xi);
}else{
Ina *= v / (1.0f - expVal);
Ik *= v / (1.0f - expVal);
}
// add stimulation current
Itotal = Istim;
// add leakage, Na, and K current
Itotal -= gL * (v - C_eL);
// Na current
Itotal -= Ina;
// K+ current
Itotal -= Ik;
// add synaptic currents
Itotal -= gExc * (v - C_eExc);
Itotal -= gInh * (v - C_eInh);
// membrane voltage
float dv = dt / C_Cm * Itotal;
// Na activation
float dm = dt * (
// aight
(1 - m) * 60000 * (v + 0.033f)
/ (1 - expf(-(v + 0.033f) / 0.003f))
// yes!
+ m * 70000 * (v + 0.042f)
/ (1 - expf((v + 0.042f) / 0.02f))
);
// Na inactivation
float dh = dt * (
- (1 - h) * 50000 * (v + 0.065f)
/ (1 - expf((v + 0.065f) / 0.006f))
- h * 2250
/ (1 + expf(-(v + 0.01f) / 0.01f))
);
// K activation
float dn = dt * (
// wumbaba
(1 - n) * 16000 * (v + 0.01f)
/ (1 - expf(-(v + 0.01f) / 0.01f))
+ n * 40000 * (v + 0.035f)
/ (1 - expf((v + 0.035f) / 0.01f))
);
// always update membrane voltage
v += dv;
// we should try to avoid this
if(isnan(dm) || isnan(dh) || isnan(dn)){
// nothing
}else{
m += dm;
h += dh;
n += dn;
}
// check for action potential
if(v >= -35e-3f){
firing = 1.0f;
}
}
// write back dynamics state
dynState[DYN_STATE_V * numNeurons + nId] = v;
dynState[DYN_STATE_I * numNeurons + nId] = Itotal;
dynState[DYN_STATE_M * numNeurons + nId] = m;
dynState[DYN_STATE_H * numNeurons + nId] = h;
dynState[DYN_STATE_N * numNeurons + nId] = n;
// write firing
firingVec[nId] = firing;
}
/*
** Benchmarking on a GeForce GTX 580 showed that best performance
** is achieved with 32 threads per warp and 20 warps per block.
** See commit 569c50a3eab78bd089a25d7c04d79a1103279a7e
*/
#define NUM_WARPS 20
int neuronUpdate(
const float * cond,
neuron_t * neuron,
float * firing
){
// reset CUDA error
hipGetLastError();
// block size
int blockSize = 32 * NUM_WARPS;
// update neurons
dim3 threads(blockSize);
dim3 grid((int) ceil(
(double) neuron->numNeurons / blockSize
));
// launch kernel
hipLaunchKernelGGL(( neuronUpdateKernel), dim3(grid), dim3(threads), 0, 0,
neuron->numNeurons,
cond,
neuron->dynParam,
neuron->dynState,
firing
);
// check for error
hipError_t error = hipGetLastError();
if(error != hipSuccess){
printf("Could not update neuron states. Error:\n");
printf("%s", hipGetErrorString(error));
return -1;
}
return 0;
}
|
f62e40bc81dfb0aed9389ddd065e14c9e238c6eb.cu
|
#include <math.h>
#include <stdio.h>
#include "gpu.h"
#include "io.h"
#include "neuron.h"
#include "syn.h"
static const char * dynParamFile = "dynParam.bin";
static const char * dynStateFile = "dynState.bin";
int neuronNew(neuron_t * neuron){
// parameters
const float * dynParam;
dynParam = (const float *) calloc(
neuron->numNeurons * DYN_PARAM_LEN,
sizeof(float)
);
if(!dynParam) return -1;
// state
float * dynState;
dynState = (float *) calloc(
neuron->numNeurons * DYN_STATE_LEN,
sizeof(float)
);
if(!dynState) return -1;
// write back
neuron->dynParam = dynParam;
neuron->dynState = dynState;
return 0;
}
int neuronCopyToGPU(neuron_t * neuron){
int error;
// neuronal parameters
const float * dynParam;
error = gpuCopyTo(
neuron->numNeurons * DYN_PARAM_LEN * sizeof(float),
(const void *) neuron->dynParam,
(void **) &dynParam
);
if(error){
printf("Could not copy neuron parameters to GPU\n");
return -1;
}
// neuronal state
float * dynState;
error = gpuCopyTo(
neuron->numNeurons * DYN_STATE_LEN * sizeof(float),
(const void *) neuron->dynState,
(void **) &dynState
);
if(error){
printf("Could not copy neuron states to GPU\n");
return -1;
}
// write back
neuron->dynParam = dynParam;
neuron->dynState = dynState;
return 0;
}
int neuronRead(neuron_t * neuron){
int error;
// read dynamics parameters
error = ioReadMat(
dynParamFile,
neuron->numNeurons,
DYN_PARAM_LEN,
(float *) neuron->dynParam
);
if(error){
printf("Failed to read %s\n", dynParamFile);
return -1;
}
// read dynamics state matrix
error = ioReadMat(
dynStateFile,
neuron->numNeurons,
DYN_STATE_LEN,
neuron->dynState
);
if(error){
printf("Failed to read %s\n", dynStateFile);
return -1;
}
return 0;
}
int neuronReadSize(int * pNumNeurons){
int error;
int rows;
int cols;
// check neuron parameters
error = ioReadMatSize(dynParamFile, &rows, &cols);
if(error) return -1;
if(cols != DYN_PARAM_LEN){
printf("Invalid column count in %s\n", dynParamFile);
return -1;
}
// this should be a constant
const int numNeurons = rows;
// check neuron state
error = ioReadMatSize(dynStateFile, &rows, &cols);
if(error) return -1;
if(rows != numNeurons){
printf("Invalid rows count in %s\n", dynStateFile);
return -1;
}
if(cols != DYN_STATE_LEN){
printf("Invalid column count in %s\n", dynStateFile);
return -1;
}
// write back
*pNumNeurons = numNeurons;
// report success
return 0;
}
// Faraday constant (C / mol)
#define C_F 96485.34f
// ideal gas constant (V C / K mol)
#define C_R 8.31446f
// temperature (K)
#define C_T 295.0f
// xi
#define C_xi (96485.34f / (8.31446f * 295.0f))
// internal Na concentration (mol / m^3)
#define C_cNaI 14.0f
// external Na concentration (mol / m^3)
#define C_cNaO 114.0f
// internal K concentration (mol / m^3)
#define C_cKI 120.0f
// external K concentration (mol / m^3)
#define C_cKO 2.5f
// leakage reversal potential (V)
#define C_eL -70e-3f
// excitatory reversal potential (V)
#define C_eExc 0.0f
// inhibitory reversal potential (V)
#define C_eInh -65e-3f
// membrane capacitance (F / m^2)
#define C_Cm 7e-12f
// membrane area (C / mol)
#define C_A 100e-12f
__global__ void neuronUpdateKernel(
const int numNeurons,
const float * __restrict__ cond,
const float * __restrict__ dynParam,
float * __restrict__ dynState,
float * __restrict__ firingVec
){
// neuron id
const int nId = blockDim.x * blockIdx.x + threadIdx.x;
// let's not exaggerate
if(nId >= numNeurons) return;
// current state
float v = dynState[DYN_STATE_V * numNeurons + nId];
float m = dynState[DYN_STATE_M * numNeurons + nId];
float h = dynState[DYN_STATE_H * numNeurons + nId];
float n = dynState[DYN_STATE_N * numNeurons + nId];
// parameters
const float gL = dynParam[DYN_PARAM_GL * numNeurons + nId];
const float pNa = dynParam[DYN_PARAM_PNA * numNeurons + nId];
const float pK = dynParam[DYN_PARAM_PK * numNeurons + nId];
const float type = dynParam[DYN_PARAM_TYPE * numNeurons + nId];
// conductances
const float gExc = cond[SYN_TYPE_EXC * numNeurons + nId];
const float gInh = cond[SYN_TYPE_INH * numNeurons + nId];
// total current (A / m^2)
float Itotal;
// stimulation current (A / m^2)
float Istim = 0.0f;
// add stimulation
if(type < 0.5f){
// excitatory neuron
Istim = 5.5e-12f;
}else{
// inhibitory neuron
Istim = 10e-12f;
}
float dt = 1e-6f;
float firing = 0.0f;
for(int i = 0; i < 1000; i++){
float expVal = expf(v * C_xi);
/*
** TODO
** This is a very crude way to prevent a division by zero.
** Possible solutions:
** - Check for zero voltage before expVal
** - Try to use de l'Hôpital's rule
*/
float Ina;
Ina = C_A * C_F * C_xi * m * m * h * pNa;
Ina *= C_cNaO - C_cNaI * expVal;
float Ik;
Ik = C_A * C_F * C_xi * n * n * pK;
Ik *= C_cKO - C_cKI * expVal;
/*
** Avoid division by zero and use de l'Hôpital's rule
** to calculate Ina and Ik.
*/
if(expVal == 1.0f){
Ina *= 1.0f / (1.0f - C_xi);
Ik *= 1.0f / (1.0f - C_xi);
}else{
Ina *= v / (1.0f - expVal);
Ik *= v / (1.0f - expVal);
}
// add stimulation current
Itotal = Istim;
// add leakage, Na, and K current
Itotal -= gL * (v - C_eL);
// Na current
Itotal -= Ina;
// K+ current
Itotal -= Ik;
// add synaptic currents
Itotal -= gExc * (v - C_eExc);
Itotal -= gInh * (v - C_eInh);
// membrane voltage
float dv = dt / C_Cm * Itotal;
// Na activation
float dm = dt * (
// aight
(1 - m) * 60000 * (v + 0.033f)
/ (1 - expf(-(v + 0.033f) / 0.003f))
// yes!
+ m * 70000 * (v + 0.042f)
/ (1 - expf((v + 0.042f) / 0.02f))
);
// Na inactivation
float dh = dt * (
- (1 - h) * 50000 * (v + 0.065f)
/ (1 - expf((v + 0.065f) / 0.006f))
- h * 2250
/ (1 + expf(-(v + 0.01f) / 0.01f))
);
// K activation
float dn = dt * (
// wumbaba
(1 - n) * 16000 * (v + 0.01f)
/ (1 - expf(-(v + 0.01f) / 0.01f))
+ n * 40000 * (v + 0.035f)
/ (1 - expf((v + 0.035f) / 0.01f))
);
// always update membrane voltage
v += dv;
// we should try to avoid this
if(isnan(dm) || isnan(dh) || isnan(dn)){
// nothing
}else{
m += dm;
h += dh;
n += dn;
}
// check for action potential
if(v >= -35e-3f){
firing = 1.0f;
}
}
// write back dynamics state
dynState[DYN_STATE_V * numNeurons + nId] = v;
dynState[DYN_STATE_I * numNeurons + nId] = Itotal;
dynState[DYN_STATE_M * numNeurons + nId] = m;
dynState[DYN_STATE_H * numNeurons + nId] = h;
dynState[DYN_STATE_N * numNeurons + nId] = n;
// write firing
firingVec[nId] = firing;
}
/*
** Benchmarking on a GeForce GTX 580 showed that best performance
** is achieved with 32 threads per warp and 20 warps per block.
** See commit 569c50a3eab78bd089a25d7c04d79a1103279a7e
*/
#define NUM_WARPS 20
int neuronUpdate(
const float * cond,
neuron_t * neuron,
float * firing
){
// reset CUDA error
cudaGetLastError();
// block size
int blockSize = 32 * NUM_WARPS;
// update neurons
dim3 threads(blockSize);
dim3 grid((int) ceil(
(double) neuron->numNeurons / blockSize
));
// launch kernel
neuronUpdateKernel<<<grid, threads>>>(
neuron->numNeurons,
cond,
neuron->dynParam,
neuron->dynState,
firing
);
// check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
printf("Could not update neuron states. Error:\n");
printf("%s", cudaGetErrorString(error));
return -1;
}
return 0;
}
|
6c9d99a9f95593b704b5b0d6536dd1448d457ecc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zcaxpycp.cu mixed zc -> ds, Fri Jan 30 19:00:07 2015
*/
#include "common_magma.h"
#define NB 64
// adds x += r (including conversion to double) --and--
// copies w = b
// each thread does one index, x[i] and w[i]
__global__ void
dsaxpycp_kernel(
int m, float *r, double *x,
const double *b, double *w )
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_D_ADD( x[i], (double)( r[i] ) );
w[i] = b[i];
}
}
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
daxpycp_kernel(
int m, double *r, double *x,
const double *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_D_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r (including conversion to double) --and--
// copies w = b
extern "C" void
magmablas_dsaxpycp_q(
magma_int_t m,
magmaFloat_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b,
magmaDouble_ptr w,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
hipLaunchKernelGGL(( dsaxpycp_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b, w );
}
extern "C" void
magmablas_dsaxpycp(
magma_int_t m,
magmaFloat_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b,
magmaDouble_ptr w)
{
magmablas_dsaxpycp_q( m, r, x, b, w, magma_stream );
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_daxpycp_q(
magma_int_t m,
magmaDouble_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
hipLaunchKernelGGL(( daxpycp_kernel) , dim3(grid), dim3(threads), 0, queue , m, r, x, b );
}
extern "C" void
magmablas_daxpycp(
magma_int_t m,
magmaDouble_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b)
{
magmablas_daxpycp_q( m, r, x, b, magma_stream );
}
|
6c9d99a9f95593b704b5b0d6536dd1448d457ecc.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zcaxpycp.cu mixed zc -> ds, Fri Jan 30 19:00:07 2015
*/
#include "common_magma.h"
#define NB 64
// adds x += r (including conversion to double) --and--
// copies w = b
// each thread does one index, x[i] and w[i]
__global__ void
dsaxpycp_kernel(
int m, float *r, double *x,
const double *b, double *w )
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_D_ADD( x[i], (double)( r[i] ) );
w[i] = b[i];
}
}
// adds x += r --and--
// copies r = b
// each thread does one index, x[i] and r[i]
__global__ void
daxpycp_kernel(
int m, double *r, double *x,
const double *b)
{
const int i = threadIdx.x + blockIdx.x*NB;
if ( i < m ) {
x[i] = MAGMA_D_ADD( x[i], r[i] );
r[i] = b[i];
}
}
// ----------------------------------------------------------------------
// adds x += r (including conversion to double) --and--
// copies w = b
extern "C" void
magmablas_dsaxpycp_q(
magma_int_t m,
magmaFloat_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b,
magmaDouble_ptr w,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
dsaxpycp_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b, w );
}
extern "C" void
magmablas_dsaxpycp(
magma_int_t m,
magmaFloat_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b,
magmaDouble_ptr w)
{
magmablas_dsaxpycp_q( m, r, x, b, w, magma_stream );
}
// ----------------------------------------------------------------------
// adds x += r --and--
// copies r = b
extern "C" void
magmablas_daxpycp_q(
magma_int_t m,
magmaDouble_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
daxpycp_kernel <<< grid, threads, 0, queue >>> ( m, r, x, b );
}
extern "C" void
magmablas_daxpycp(
magma_int_t m,
magmaDouble_ptr r,
magmaDouble_ptr x,
magmaDouble_const_ptr b)
{
magmablas_daxpycp_q( m, r, x, b, magma_stream );
}
|
a70b49ecf2c2d1bd4f84a6ba80c899997d4e1365.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/block.hpp"
#include "pcl/gpu/utils/device/limits.hpp"
#include "pcl/gpu/utils/device/vector_math.hpp"
#include "pcl/gpu/utils/device/functional.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "thrust/transform.h"
#include "thrust/device_ptr.h"
namespace pcl
{
namespace device
{
//[spinimage][angles] = [0..FSize][..FSize]
extern __shared__ float simage_angles[];
template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return *(float3*)&ptr[index]; }
//template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return tr(ptr[index]); }
struct UseCustomAxis
{
float3 rotation_axis;
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& /*normal*/) const { return rotation_axis; }
};
struct UseCustomAxesCloud
{
const NormalType* rotation_axes_cloud;
__device__ __forceinline__ float3 getRotationAxes(int index, const float3& /*normal*/) const { return fetch(rotation_axes_cloud, index); }
};
struct UseOriginNormal
{
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& normal) const { return normal; }
};
struct Div12eps
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return (float)(v1 / ( v2 + numeric_limits<double>::epsilon() )); }
};
struct DivValIfNonZero
{
float val;
__device__ __forceinline__ DivValIfNonZero(float value) : val(value) {}
__device__ __forceinline__ float operator()(float v) const { return val == 0 ? v : v/val; }
};
template<bool radial, bool angular, typename AxesStrategy>
struct SpinImpl : public AxesStrategy
{
enum
{
CTA_SIZE = 192
};
int work_size;
const int* indices;
const PointType* input_cloud;
const NormalType* input_normals;
const PointType* surface;
const NormalType* normals;
PtrStep<int> neighbor_indices;
const int* neighbor_indices_sizes;
float support_angle_cos;
int min_neighb;
int image_width;
float bin_size;
int FSize;
mutable PtrStep<float> output;
static __device__ __host__ __forceinline__ int computeFSize(int image_width)
{
int cols = 1 + image_width * 2;
int rows = 1 + image_width;
return cols * rows;
}
__device__ __forceinline__ void operator()() const
{
int i_input = blockIdx.x + gridDim.x * blockIdx.y;
int index = indices[i_input];
int neighb_count = neighbor_indices_sizes[i_input];
const int *ginds = neighbor_indices.ptr (i_input);
if (neighb_count < min_neighb)
return;
//set zeros to spin image
Block::fill(simage_angles, simage_angles + FSize, 0.f);
if (angular) //set zeros to angles
Block::fill(simage_angles + FSize, simage_angles + FSize + FSize, 0.f);
__syncthreads();
float3 origin_point = fetch(input_cloud, index);
float3 origin_normal = input_normals ? fetch(input_normals, index) : make_float3(0.f, 0.f, 0.f);
origin_normal = normalized_safe(origin_normal); //normalize if non-zero
float3 rotation_axis = AxesStrategy::getRotationAxes(index, origin_normal);
rotation_axis = normalized_safe(rotation_axis); //normalize if non-zero
const float eps = numeric_limits<float>::epsilon ();
for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE)
{
int neighb_index = ginds[i_neighb];
// first, skip the points with distant normals
float cos_between_normals = -2.f;
if (angular || support_angle_cos > 0.f) // not bogus
{
float3 normal = normalized(fetch(normals, neighb_index));
cos_between_normals = dot(origin_normal, normal);
cos_between_normals = fmax (-1.f, fmin (1.f, cos_between_normals));
if (fabs(cos_between_normals) < support_angle_cos) // allow counter-directed normals
continue;
cos_between_normals = fabs(cos_between_normals); // the normal is not used explicitly from now
}
// now compute the coordinate in cylindric coordinate system associated with the origin point
float3 direction = fetch(surface, neighb_index) - origin_point;
float direction_norm = norm (direction);
// ignore the point itself; it does not contribute really
if (direction_norm < 10 * eps)
continue;
// the angle between the normal vector and the direction to the point
float cos_dir_axis = dot(direction, rotation_axis) / direction_norm;
cos_dir_axis = fmax(-1.f, fmin(1.f, cos_dir_axis));
// compute coordinates w.r.t. the reference frame
float beta = numeric_limits<float>::quiet_NaN();
float alpha = numeric_limits<float>::quiet_NaN();
if (radial) // radial spin image structure
{
beta = asinf(cos_dir_axis); // yes, arc sine! to get the angle against tangent, not normal!
alpha = direction_norm;
}
else // rectangular spin-image structure
{
beta = direction_norm * cos_dir_axis;
alpha = direction_norm * sqrt (1.0 - cos_dir_axis*cos_dir_axis);
if (fabs (beta) >= bin_size * image_width || alpha >= bin_size * image_width)
continue; // outside the cylinder
}
// bilinear interpolation
float beta_bin_size = radial ? (PI*0.5f/image_width) : bin_size;
int beta_bin = floorf(beta / beta_bin_size) + image_width;
int alpha_bin = floorf(alpha / bin_size);
//alpha_bin = min(simage_cols, max(0, alpha_bin));
//beta_bin = min(simage_rows, max(0, beta_bin));
if (alpha_bin == image_width) // border points
{
alpha_bin--;
// HACK: to prevent a > 1
alpha = bin_size * (alpha_bin + 1) - eps;
}
if (beta_bin == 2*image_width ) // border points
{
beta_bin--;
// HACK: to prevent b > 1
beta = beta_bin_size * (beta_bin - image_width + 1) - eps;
}
float a = alpha/bin_size - alpha_bin;
float b = beta/beta_bin_size - float(beta_bin-image_width);
incSpinI(alpha_bin, beta_bin, (1-a) * (1-b));
incSpinI(alpha_bin+1, beta_bin, a * (1-b));
incSpinI(alpha_bin, beta_bin+1, (1-a) * b );
incSpinI(alpha_bin+1, beta_bin+1, a * b );
if (angular)
{
float anlge_betwwn_normals = acos(cos_between_normals);
incAngle(alpha_bin, beta_bin, anlge_betwwn_normals * (1-a) * (1-b));
incAngle(alpha_bin+1, beta_bin, anlge_betwwn_normals * a * (1-b));
incAngle(alpha_bin, beta_bin+1, anlge_betwwn_normals * (1-a) * b );
incAngle(alpha_bin+1, beta_bin+1, anlge_betwwn_normals * a * b );
}
} /* for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE) */
__syncthreads();
if (angular)
{
//transform sum to average dividing angle/spinimage element-wize.
const float *angles_beg = simage_angles + FSize;
const float *angles_end = angles_beg + FSize;
const float *images_beg = simage_angles;
Block::transform(angles_beg, angles_end, images_beg, output.ptr(i_input), Div12eps());
////Block::copy(angles_beg, angles_end, output.ptr(i_input));
//Block::copy(images_beg, images_beg + FSize, output.ptr(i_input));
}
else
{
// copy to compute sum
Block::copy(simage_angles, simage_angles + FSize, simage_angles + FSize);
__syncthreads();
//compute sum
Block::reduce_n(simage_angles + FSize, FSize, pcl::device::plus<float>());
__syncthreads();
float sum = simage_angles[FSize];
Block::transform(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum));
}
}
__device__ __forceinline__ void incSpinI(int y, int x, float value) const { atomicAdd(simage_angles + y * (2*image_width + 1) + x, value); }
__device__ __forceinline__ void incAngle(int y, int x, float value) const { atomicAdd(simage_angles+FSize + y * (2*image_width + 1) + x, value); }
};
template<typename Impl>
__global__ void computeSpinKernel(const Impl impl) { impl(); }
template<typename Impl>
inline void computeSpinImages_caller(Impl& impl, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
impl.work_size = (int)indices.size();
impl.indices = indices;
impl.input_cloud = input_cloud;
impl.input_normals = input_normals;
impl.surface = surface;
impl.normals = normals;
impl.neighbor_indices = neighbours;
impl.neighbor_indices_sizes = neighbours.sizes;
impl.min_neighb = min_neighb;
impl.image_width = image_width;
impl.bin_size = bin_size;
impl.support_angle_cos = support_angle_cos;
impl.FSize = Impl::computeFSize(image_width);
impl.output = output;
const int total = (int)indices.size();
const int max_grid_dim = 65535;
const int smem_size = 2 * Impl::computeFSize(image_width) * sizeof(float);
dim3 block(Impl::CTA_SIZE);
dim3 grid(min(total, max_grid_dim), divUp(total, max_grid_dim));
hipLaunchKernelGGL(( computeSpinKernel<Impl>), dim3(grid), dim3(block), smem_size, 0, impl);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
template<bool radial, bool angular>
void computeSpinImagesOriginNormalEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
SpinImpl<radial, angular, UseOriginNormal> si;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxis> si;
si.rotation_axis = rotation_axis;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesCloudEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxesCloud> si;
si.rotation_axes_cloud = rotation_axes_cloud;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
}
}
void pcl::device::computeSpinImagesOrigigNormal(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
typedef void (*originNormal)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int , int , float, PtrStep<float>);
const originNormal table[2][2] =
{
{ computeSpinImagesOriginNormalEx<false, false>, computeSpinImagesOriginNormalEx<false, true> },
{ computeSpinImagesOriginNormalEx<true, false>, computeSpinImagesOriginNormalEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
void pcl::device::computeSpinImagesCustomAxes(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
typedef void (*customAxes)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const float3&, PtrStep<float>);
const customAxes table[2][2] =
{
{ computeSpinImagesCustomAxesEx<false, false>, computeSpinImagesCustomAxesEx<false, true> },
{ computeSpinImagesCustomAxesEx<true, false>, computeSpinImagesCustomAxesEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axis, output);
}
void pcl::device::computeSpinImagesCustomAxesCloud(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
typedef void (*customAxesCloud)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const Normals&, PtrStep<float>);
const customAxesCloud table[2][2] =
{
{ computeSpinImagesCustomAxesCloudEx<false, false>, computeSpinImagesCustomAxesCloudEx<false, true> },
{ computeSpinImagesCustomAxesCloudEx<true, false>, computeSpinImagesCustomAxesCloudEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axes_cloud, output);
};
namespace pcl
{
namespace device
{
struct GtThan
{
int val;
GtThan(int value) : val(value) {}
__device__ __forceinline__ unsigned char operator()(int size) const { return size > val ? 1 : 0; }
};
}
}
void pcl::device::computeMask(const NeighborIndices& neighbours, int min_neighb, DeviceArray<unsigned char>& mask)
{
thrust::device_ptr<int> beg((int*)neighbours.sizes.ptr());
thrust::device_ptr<int> end = beg + neighbours.sizes.size();
thrust::device_ptr<unsigned char> out(mask.ptr());
thrust::transform(beg, end, out, GtThan(min_neighb));
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
|
a70b49ecf2c2d1bd4f84a6ba80c899997d4e1365.cu
|
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/block.hpp"
#include "pcl/gpu/utils/device/limits.hpp"
#include "pcl/gpu/utils/device/vector_math.hpp"
#include "pcl/gpu/utils/device/functional.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "thrust/transform.h"
#include "thrust/device_ptr.h"
namespace pcl
{
namespace device
{
//[spinimage][angles] = [0..FSize][..FSize]
extern __shared__ float simage_angles[];
template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return *(float3*)&ptr[index]; }
//template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return tr(ptr[index]); }
struct UseCustomAxis
{
float3 rotation_axis;
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& /*normal*/) const { return rotation_axis; }
};
struct UseCustomAxesCloud
{
const NormalType* rotation_axes_cloud;
__device__ __forceinline__ float3 getRotationAxes(int index, const float3& /*normal*/) const { return fetch(rotation_axes_cloud, index); }
};
struct UseOriginNormal
{
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& normal) const { return normal; }
};
struct Div12eps
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return (float)(v1 / ( v2 + numeric_limits<double>::epsilon() )); }
};
struct DivValIfNonZero
{
float val;
__device__ __forceinline__ DivValIfNonZero(float value) : val(value) {}
__device__ __forceinline__ float operator()(float v) const { return val == 0 ? v : v/val; }
};
template<bool radial, bool angular, typename AxesStrategy>
struct SpinImpl : public AxesStrategy
{
enum
{
CTA_SIZE = 192
};
int work_size;
const int* indices;
const PointType* input_cloud;
const NormalType* input_normals;
const PointType* surface;
const NormalType* normals;
PtrStep<int> neighbor_indices;
const int* neighbor_indices_sizes;
float support_angle_cos;
int min_neighb;
int image_width;
float bin_size;
int FSize;
mutable PtrStep<float> output;
static __device__ __host__ __forceinline__ int computeFSize(int image_width)
{
int cols = 1 + image_width * 2;
int rows = 1 + image_width;
return cols * rows;
}
__device__ __forceinline__ void operator()() const
{
int i_input = blockIdx.x + gridDim.x * blockIdx.y;
int index = indices[i_input];
int neighb_count = neighbor_indices_sizes[i_input];
const int *ginds = neighbor_indices.ptr (i_input);
if (neighb_count < min_neighb)
return;
//set zeros to spin image
Block::fill(simage_angles, simage_angles + FSize, 0.f);
if (angular) //set zeros to angles
Block::fill(simage_angles + FSize, simage_angles + FSize + FSize, 0.f);
__syncthreads();
float3 origin_point = fetch(input_cloud, index);
float3 origin_normal = input_normals ? fetch(input_normals, index) : make_float3(0.f, 0.f, 0.f);
origin_normal = normalized_safe(origin_normal); //normalize if non-zero
float3 rotation_axis = AxesStrategy::getRotationAxes(index, origin_normal);
rotation_axis = normalized_safe(rotation_axis); //normalize if non-zero
const float eps = numeric_limits<float>::epsilon ();
for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE)
{
int neighb_index = ginds[i_neighb];
// first, skip the points with distant normals
float cos_between_normals = -2.f;
if (angular || support_angle_cos > 0.f) // not bogus
{
float3 normal = normalized(fetch(normals, neighb_index));
cos_between_normals = dot(origin_normal, normal);
cos_between_normals = fmax (-1.f, fmin (1.f, cos_between_normals));
if (fabs(cos_between_normals) < support_angle_cos) // allow counter-directed normals
continue;
cos_between_normals = fabs(cos_between_normals); // the normal is not used explicitly from now
}
// now compute the coordinate in cylindric coordinate system associated with the origin point
float3 direction = fetch(surface, neighb_index) - origin_point;
float direction_norm = norm (direction);
// ignore the point itself; it does not contribute really
if (direction_norm < 10 * eps)
continue;
// the angle between the normal vector and the direction to the point
float cos_dir_axis = dot(direction, rotation_axis) / direction_norm;
cos_dir_axis = fmax(-1.f, fmin(1.f, cos_dir_axis));
// compute coordinates w.r.t. the reference frame
float beta = numeric_limits<float>::quiet_NaN();
float alpha = numeric_limits<float>::quiet_NaN();
if (radial) // radial spin image structure
{
beta = asinf(cos_dir_axis); // yes, arc sine! to get the angle against tangent, not normal!
alpha = direction_norm;
}
else // rectangular spin-image structure
{
beta = direction_norm * cos_dir_axis;
alpha = direction_norm * sqrt (1.0 - cos_dir_axis*cos_dir_axis);
if (fabs (beta) >= bin_size * image_width || alpha >= bin_size * image_width)
continue; // outside the cylinder
}
// bilinear interpolation
float beta_bin_size = radial ? (PI*0.5f/image_width) : bin_size;
int beta_bin = floorf(beta / beta_bin_size) + image_width;
int alpha_bin = floorf(alpha / bin_size);
//alpha_bin = min(simage_cols, max(0, alpha_bin));
//beta_bin = min(simage_rows, max(0, beta_bin));
if (alpha_bin == image_width) // border points
{
alpha_bin--;
// HACK: to prevent a > 1
alpha = bin_size * (alpha_bin + 1) - eps;
}
if (beta_bin == 2*image_width ) // border points
{
beta_bin--;
// HACK: to prevent b > 1
beta = beta_bin_size * (beta_bin - image_width + 1) - eps;
}
float a = alpha/bin_size - alpha_bin;
float b = beta/beta_bin_size - float(beta_bin-image_width);
incSpinI(alpha_bin, beta_bin, (1-a) * (1-b));
incSpinI(alpha_bin+1, beta_bin, a * (1-b));
incSpinI(alpha_bin, beta_bin+1, (1-a) * b );
incSpinI(alpha_bin+1, beta_bin+1, a * b );
if (angular)
{
float anlge_betwwn_normals = acos(cos_between_normals);
incAngle(alpha_bin, beta_bin, anlge_betwwn_normals * (1-a) * (1-b));
incAngle(alpha_bin+1, beta_bin, anlge_betwwn_normals * a * (1-b));
incAngle(alpha_bin, beta_bin+1, anlge_betwwn_normals * (1-a) * b );
incAngle(alpha_bin+1, beta_bin+1, anlge_betwwn_normals * a * b );
}
} /* for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE) */
__syncthreads();
if (angular)
{
//transform sum to average dividing angle/spinimage element-wize.
const float *angles_beg = simage_angles + FSize;
const float *angles_end = angles_beg + FSize;
const float *images_beg = simage_angles;
Block::transform(angles_beg, angles_end, images_beg, output.ptr(i_input), Div12eps());
////Block::copy(angles_beg, angles_end, output.ptr(i_input));
//Block::copy(images_beg, images_beg + FSize, output.ptr(i_input));
}
else
{
// copy to compute sum
Block::copy(simage_angles, simage_angles + FSize, simage_angles + FSize);
__syncthreads();
//compute sum
Block::reduce_n(simage_angles + FSize, FSize, pcl::device::plus<float>());
__syncthreads();
float sum = simage_angles[FSize];
Block::transform(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum));
}
}
__device__ __forceinline__ void incSpinI(int y, int x, float value) const { atomicAdd(simage_angles + y * (2*image_width + 1) + x, value); }
__device__ __forceinline__ void incAngle(int y, int x, float value) const { atomicAdd(simage_angles+FSize + y * (2*image_width + 1) + x, value); }
};
template<typename Impl>
__global__ void computeSpinKernel(const Impl impl) { impl(); }
template<typename Impl>
inline void computeSpinImages_caller(Impl& impl, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
impl.work_size = (int)indices.size();
impl.indices = indices;
impl.input_cloud = input_cloud;
impl.input_normals = input_normals;
impl.surface = surface;
impl.normals = normals;
impl.neighbor_indices = neighbours;
impl.neighbor_indices_sizes = neighbours.sizes;
impl.min_neighb = min_neighb;
impl.image_width = image_width;
impl.bin_size = bin_size;
impl.support_angle_cos = support_angle_cos;
impl.FSize = Impl::computeFSize(image_width);
impl.output = output;
const int total = (int)indices.size();
const int max_grid_dim = 65535;
const int smem_size = 2 * Impl::computeFSize(image_width) * sizeof(float);
dim3 block(Impl::CTA_SIZE);
dim3 grid(min(total, max_grid_dim), divUp(total, max_grid_dim));
computeSpinKernel<Impl><<<grid, block, smem_size>>>(impl);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template<bool radial, bool angular>
void computeSpinImagesOriginNormalEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
SpinImpl<radial, angular, UseOriginNormal> si;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxis> si;
si.rotation_axis = rotation_axis;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesCloudEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxesCloud> si;
si.rotation_axes_cloud = rotation_axes_cloud;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
}
}
void pcl::device::computeSpinImagesOrigigNormal(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
typedef void (*originNormal)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int , int , float, PtrStep<float>);
const originNormal table[2][2] =
{
{ computeSpinImagesOriginNormalEx<false, false>, computeSpinImagesOriginNormalEx<false, true> },
{ computeSpinImagesOriginNormalEx<true, false>, computeSpinImagesOriginNormalEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
void pcl::device::computeSpinImagesCustomAxes(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
typedef void (*customAxes)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const float3&, PtrStep<float>);
const customAxes table[2][2] =
{
{ computeSpinImagesCustomAxesEx<false, false>, computeSpinImagesCustomAxesEx<false, true> },
{ computeSpinImagesCustomAxesEx<true, false>, computeSpinImagesCustomAxesEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axis, output);
}
void pcl::device::computeSpinImagesCustomAxesCloud(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
typedef void (*customAxesCloud)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const Normals&, PtrStep<float>);
const customAxesCloud table[2][2] =
{
{ computeSpinImagesCustomAxesCloudEx<false, false>, computeSpinImagesCustomAxesCloudEx<false, true> },
{ computeSpinImagesCustomAxesCloudEx<true, false>, computeSpinImagesCustomAxesCloudEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axes_cloud, output);
};
namespace pcl
{
namespace device
{
struct GtThan
{
int val;
GtThan(int value) : val(value) {}
__device__ __forceinline__ unsigned char operator()(int size) const { return size > val ? 1 : 0; }
};
}
}
void pcl::device::computeMask(const NeighborIndices& neighbours, int min_neighb, DeviceArray<unsigned char>& mask)
{
thrust::device_ptr<int> beg((int*)neighbours.sizes.ptr());
thrust::device_ptr<int> end = beg + neighbours.sizes.size();
thrust::device_ptr<unsigned char> out(mask.ptr());
thrust::transform(beg, end, out, GtThan(min_neighb));
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
|
20dcc0bfa34897da3757d7c189e43cf910b19ab2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <device_launch_parameters.h>
using namespace std;
__global__ void init(float *a, float *b, int n, int m)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
float sum = 0;
if (row == 0 || row == n - 1 || col == 0 || col == n - 1)
return;
for (int m = row - 1; m <= row + 1; m++)
for (int n = col - 1; n <= col + 1; n++) {
sum += a[m*n + n];
}
sum /= 9;
b[row*n + col] = sum;
}
int main()
{
int N = 512;
int M = 512;
float *a, *b;
a = new float[N*M];
b = new float[N*M];
//initializare matrice
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
a[i*N + j] = (i + j) % 2;
}
}
float *a_d, *b_d;
//alocare device
hipMalloc((void**)&a_d, N*M * sizeof(float));
hipMalloc((void**)&b_d, N*M * sizeof(float));
dim3 nBlock(N / 32, N / 32, 1);
dim3 nThreadsBlock(32, 32, 1);
hipMemcpy(a_d, a, N * M * sizeof(float), hipMemcpyHostToDevice);
init << <nThreadsBlock, nBlock >> > (a_d, b_d, N, M);
hipMemcpy(b, b_d, N*M * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
cout << b[i + j * N] << " ";
}
cout << "\n";
}
return 0;
}
|
20dcc0bfa34897da3757d7c189e43cf910b19ab2.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <device_launch_parameters.h>
using namespace std;
__global__ void init(float *a, float *b, int n, int m)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
float sum = 0;
if (row == 0 || row == n - 1 || col == 0 || col == n - 1)
return;
for (int m = row - 1; m <= row + 1; m++)
for (int n = col - 1; n <= col + 1; n++) {
sum += a[m*n + n];
}
sum /= 9;
b[row*n + col] = sum;
}
int main()
{
int N = 512;
int M = 512;
float *a, *b;
a = new float[N*M];
b = new float[N*M];
//initializare matrice
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
a[i*N + j] = (i + j) % 2;
}
}
float *a_d, *b_d;
//alocare device
cudaMalloc((void**)&a_d, N*M * sizeof(float));
cudaMalloc((void**)&b_d, N*M * sizeof(float));
dim3 nBlock(N / 32, N / 32, 1);
dim3 nThreadsBlock(32, 32, 1);
cudaMemcpy(a_d, a, N * M * sizeof(float), cudaMemcpyHostToDevice);
init << <nThreadsBlock, nBlock >> > (a_d, b_d, N, M);
cudaMemcpy(b, b_d, N*M * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
cout << b[i + j * N] << " ";
}
cout << "\n";
}
return 0;
}
|
47fdb883a706cfbc3c527c609cbe3662a858f5b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
static __global__ void _finalizeTree(kd_tree tree);
#define FINALIZE_CHUNK_SIZE 256
void finalizeTree() {
// grab the tree header
kd_tree t;
CUDA_SAFE_CALL(hipMemcpy(&t, tree, sizeof(kd_tree), hipMemcpyDeviceToHost));
int roundedNodeCount = ((t.nodeCount - 1) / FINALIZE_CHUNK_SIZE + 1);
hipLaunchKernelGGL(( _finalizeTree), dim3(roundedNodeCount), dim3(FINALIZE_CHUNK_SIZE), 0, 0, t);
}
static __global__ void _finalizeTree(kd_tree tree) {
int myNodeIdx = threadIdx.x + blockDim.x * blockIdx.x;
if (myNodeIdx >= tree.nodeCount) return;
node myNode = tree.nodeArray[myNodeIdx];
int parent = myNode.parent;
myNode.max_val = INF;
myNode.min_val = -INF;
// now we walk up the tree, figuring out the bounds in the cut dimension
DEBUG(printf("At node %i, walking upwards\n", myNodeIdx));
while (parent >= 0) {
DEBUG(printf(" retrieving node %i\n", parent));
node currentNode = tree.nodeArray[parent];
if (currentNode.cut_dim == myNode.cut_dim) {
if (myNode.cut_val < currentNode.cut_val) {
myNode.max_val = fminf(myNode.max_val, currentNode.cut_val);
} else {
myNode.min_val = fmaxf(myNode.min_val, currentNode.cut_val);
}
}
parent = currentNode.parent;
}
// now write the node back
tree.nodeArray[myNodeIdx].min_val = myNode.min_val;
tree.nodeArray[myNodeIdx].max_val = myNode.max_val;
}
|
47fdb883a706cfbc3c527c609cbe3662a858f5b4.cu
|
static __global__ void _finalizeTree(kd_tree tree);
#define FINALIZE_CHUNK_SIZE 256
void finalizeTree() {
// grab the tree header
kd_tree t;
CUDA_SAFE_CALL(cudaMemcpy(&t, tree, sizeof(kd_tree), cudaMemcpyDeviceToHost));
int roundedNodeCount = ((t.nodeCount - 1) / FINALIZE_CHUNK_SIZE + 1);
_finalizeTree<<<roundedNodeCount, FINALIZE_CHUNK_SIZE>>>(t);
}
static __global__ void _finalizeTree(kd_tree tree) {
int myNodeIdx = threadIdx.x + blockDim.x * blockIdx.x;
if (myNodeIdx >= tree.nodeCount) return;
node myNode = tree.nodeArray[myNodeIdx];
int parent = myNode.parent;
myNode.max_val = INF;
myNode.min_val = -INF;
// now we walk up the tree, figuring out the bounds in the cut dimension
DEBUG(printf("At node %i, walking upwards\n", myNodeIdx));
while (parent >= 0) {
DEBUG(printf(" retrieving node %i\n", parent));
node currentNode = tree.nodeArray[parent];
if (currentNode.cut_dim == myNode.cut_dim) {
if (myNode.cut_val < currentNode.cut_val) {
myNode.max_val = fminf(myNode.max_val, currentNode.cut_val);
} else {
myNode.min_val = fmaxf(myNode.min_val, currentNode.cut_val);
}
}
parent = currentNode.parent;
}
// now write the node back
tree.nodeArray[myNodeIdx].min_val = myNode.min_val;
tree.nodeArray[myNodeIdx].max_val = myNode.max_val;
}
|
dab1c3a6b37ae58198e9d6828bbe17a3cc28757a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FillCandidates.cuh"
__device__ void fill_candidates_impl(
short* h0_candidates,
short* h2_candidates,
const uint* module_hitStarts,
const uint* module_hitNums,
const float* hit_Phis,
const uint hit_offset
) {
// Notation is m0, m1, m2 in reverse order for each module
// A hit in those is h0, h1, h2 respectively
// Assign a h1 to each threadIdx.x
const auto module_index = blockIdx.y + 2; // 48 blocks y
const auto m1_hitNums = module_hitNums[module_index];
for (auto i=0; i<(m1_hitNums + blockDim.x - 1) / blockDim.x; ++i) {
const auto h1_rel_index = i*blockDim.x + threadIdx.x;
if (h1_rel_index < m1_hitNums) {
// Find for module module_index, hit h1_rel_index the candidates
const auto m0_hitStarts = module_hitStarts[module_index+2] - hit_offset;
const auto m2_hitStarts = module_hitStarts[module_index-2] - hit_offset;
const auto m0_hitNums = module_hitNums[module_index+2];
const auto m2_hitNums = module_hitNums[module_index-2];
const auto h1_index = module_hitStarts[module_index] + h1_rel_index - hit_offset;
// Calculate phi limits
const float h1_phi = hit_Phis[h1_index];
// Find candidates
bool first_h0_found = false, last_h0_found = false;
bool first_h2_found = false, last_h2_found = false;
// Add h0 candidates
for (auto h0_rel_index=0; h0_rel_index < m0_hitNums; ++h0_rel_index) {
const unsigned short h0_index = m0_hitStarts + h0_rel_index;
const auto h0_phi = hit_Phis[h0_index];
const bool tolerance_condition = fabs(h1_phi - h0_phi) < VeloTracking::phi_extrapolation;
if (!first_h0_found && tolerance_condition) {
h0_candidates[2*h1_index] = h0_index;
first_h0_found = true;
}
else if (first_h0_found && !last_h0_found && !tolerance_condition) {
h0_candidates[2*h1_index + 1] = h0_index;
last_h0_found = true;
break;
}
}
if (first_h0_found && !last_h0_found) {
h0_candidates[2*h1_index + 1] = m0_hitStarts + m0_hitNums;
}
// In case of repeated execution, we need to populate
// the candidates with -1 if not found
else if (!first_h0_found) {
h0_candidates[2*h1_index] = -1;
h0_candidates[2*h1_index + 1] = -1;
}
// Add h2 candidates
for (int h2_rel_index=0; h2_rel_index < m2_hitNums; ++h2_rel_index) {
const unsigned short h2_index = m2_hitStarts + h2_rel_index;
const auto h2_phi = hit_Phis[h2_index];
const bool tolerance_condition = fabs(h1_phi - h2_phi) < VeloTracking::phi_extrapolation;
if (!first_h2_found && tolerance_condition) {
h2_candidates[2*h1_index] = h2_index;
first_h2_found = true;
}
else if (first_h2_found && !last_h2_found && !tolerance_condition) {
h2_candidates[2*h1_index + 1] = h2_index;
last_h2_found = true;
break;
}
}
if (first_h2_found && !last_h2_found) {
h2_candidates[2*h1_index + 1] = m2_hitStarts + m2_hitNums;
}
else if (!first_h2_found) {
h2_candidates[2*h1_index] = -1;
h2_candidates[2*h1_index + 1] = -1;
}
}
}
}
__global__ void fill_candidates(
uint32_t* dev_velo_cluster_container,
uint* dev_module_cluster_start,
uint* dev_module_cluster_num,
short* dev_h0_candidates,
short* dev_h2_candidates
) {
/* Data initialization */
// Each event is treated with two blocks, one for each side.
const uint event_number = blockIdx.x;
const uint number_of_events = gridDim.x;
// Pointers to data within the event
const uint number_of_hits = dev_module_cluster_start[VeloTracking::n_modules * number_of_events];
const uint* module_hitStarts = dev_module_cluster_start + event_number * VeloTracking::n_modules;
const uint* module_hitNums = dev_module_cluster_num + event_number * VeloTracking::n_modules;
const uint hit_offset = module_hitStarts[0];
assert((module_hitStarts[52] - module_hitStarts[0]) < VeloTracking::max_number_of_hits_per_event);
// Order has changed since SortByPhi
const float* hit_Phis = (float*) (dev_velo_cluster_container + 4 * number_of_hits + hit_offset);
short* h0_candidates = dev_h0_candidates + 2*hit_offset;
short* h2_candidates = dev_h2_candidates + 2*hit_offset;
fill_candidates_impl(
h0_candidates,
h2_candidates,
module_hitStarts,
module_hitNums,
hit_Phis,
hit_offset
);
}
|
dab1c3a6b37ae58198e9d6828bbe17a3cc28757a.cu
|
#include "FillCandidates.cuh"
__device__ void fill_candidates_impl(
short* h0_candidates,
short* h2_candidates,
const uint* module_hitStarts,
const uint* module_hitNums,
const float* hit_Phis,
const uint hit_offset
) {
// Notation is m0, m1, m2 in reverse order for each module
// A hit in those is h0, h1, h2 respectively
// Assign a h1 to each threadIdx.x
const auto module_index = blockIdx.y + 2; // 48 blocks y
const auto m1_hitNums = module_hitNums[module_index];
for (auto i=0; i<(m1_hitNums + blockDim.x - 1) / blockDim.x; ++i) {
const auto h1_rel_index = i*blockDim.x + threadIdx.x;
if (h1_rel_index < m1_hitNums) {
// Find for module module_index, hit h1_rel_index the candidates
const auto m0_hitStarts = module_hitStarts[module_index+2] - hit_offset;
const auto m2_hitStarts = module_hitStarts[module_index-2] - hit_offset;
const auto m0_hitNums = module_hitNums[module_index+2];
const auto m2_hitNums = module_hitNums[module_index-2];
const auto h1_index = module_hitStarts[module_index] + h1_rel_index - hit_offset;
// Calculate phi limits
const float h1_phi = hit_Phis[h1_index];
// Find candidates
bool first_h0_found = false, last_h0_found = false;
bool first_h2_found = false, last_h2_found = false;
// Add h0 candidates
for (auto h0_rel_index=0; h0_rel_index < m0_hitNums; ++h0_rel_index) {
const unsigned short h0_index = m0_hitStarts + h0_rel_index;
const auto h0_phi = hit_Phis[h0_index];
const bool tolerance_condition = fabs(h1_phi - h0_phi) < VeloTracking::phi_extrapolation;
if (!first_h0_found && tolerance_condition) {
h0_candidates[2*h1_index] = h0_index;
first_h0_found = true;
}
else if (first_h0_found && !last_h0_found && !tolerance_condition) {
h0_candidates[2*h1_index + 1] = h0_index;
last_h0_found = true;
break;
}
}
if (first_h0_found && !last_h0_found) {
h0_candidates[2*h1_index + 1] = m0_hitStarts + m0_hitNums;
}
// In case of repeated execution, we need to populate
// the candidates with -1 if not found
else if (!first_h0_found) {
h0_candidates[2*h1_index] = -1;
h0_candidates[2*h1_index + 1] = -1;
}
// Add h2 candidates
for (int h2_rel_index=0; h2_rel_index < m2_hitNums; ++h2_rel_index) {
const unsigned short h2_index = m2_hitStarts + h2_rel_index;
const auto h2_phi = hit_Phis[h2_index];
const bool tolerance_condition = fabs(h1_phi - h2_phi) < VeloTracking::phi_extrapolation;
if (!first_h2_found && tolerance_condition) {
h2_candidates[2*h1_index] = h2_index;
first_h2_found = true;
}
else if (first_h2_found && !last_h2_found && !tolerance_condition) {
h2_candidates[2*h1_index + 1] = h2_index;
last_h2_found = true;
break;
}
}
if (first_h2_found && !last_h2_found) {
h2_candidates[2*h1_index + 1] = m2_hitStarts + m2_hitNums;
}
else if (!first_h2_found) {
h2_candidates[2*h1_index] = -1;
h2_candidates[2*h1_index + 1] = -1;
}
}
}
}
__global__ void fill_candidates(
uint32_t* dev_velo_cluster_container,
uint* dev_module_cluster_start,
uint* dev_module_cluster_num,
short* dev_h0_candidates,
short* dev_h2_candidates
) {
/* Data initialization */
// Each event is treated with two blocks, one for each side.
const uint event_number = blockIdx.x;
const uint number_of_events = gridDim.x;
// Pointers to data within the event
const uint number_of_hits = dev_module_cluster_start[VeloTracking::n_modules * number_of_events];
const uint* module_hitStarts = dev_module_cluster_start + event_number * VeloTracking::n_modules;
const uint* module_hitNums = dev_module_cluster_num + event_number * VeloTracking::n_modules;
const uint hit_offset = module_hitStarts[0];
assert((module_hitStarts[52] - module_hitStarts[0]) < VeloTracking::max_number_of_hits_per_event);
// Order has changed since SortByPhi
const float* hit_Phis = (float*) (dev_velo_cluster_container + 4 * number_of_hits + hit_offset);
short* h0_candidates = dev_h0_candidates + 2*hit_offset;
short* h2_candidates = dev_h2_candidates + 2*hit_offset;
fill_candidates_impl(
h0_candidates,
h2_candidates,
module_hitStarts,
module_hitNums,
hit_Phis,
hit_offset
);
}
|
d4d50649e5cb5e30dd550417140407f87d552a09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#include "Dependencies\glew\glew.h"
#include "Dependencies\freeglut\freeglut.h"
#include <stdlib.h>
#include <random>
#include <stdio.h>
using namespace std;
#define WIDTH 1024
#define HEIGHT 768
#define SIZE 786432
#define X_SIZE 0.001953125 // 2/1024
#define Y_SIZE 0.002604166 // 2/768
int numOfSpecies;
float** color = new float*[10];
bool* species;
bool* tmp;
bool* dev_species = 0;
bool* dev_tmp = 0;
void glutTimer(int value);
void initializeColor();
void initializeGrid();
void keyboard(unsigned char key, int x, int y);
void display();
void draw();
void gameOfLife();
__global__ void countNeighbors(bool* dev_s, bool* temp, int type)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int numOfNeighbors = 0;
if (i >= WIDTH)
{
if (i%WIDTH != 0)
{
if (dev_s[type*SIZE + i - WIDTH - 1]) // Top-Left Corner
numOfNeighbors++;
}
if (dev_s[type*SIZE + i - WIDTH]) // Top-Center Edge
numOfNeighbors++;
if (i%WIDTH != WIDTH - 1)
{
if (dev_s[type*SIZE + i - WIDTH + 1]) // Top-Right Corner
numOfNeighbors++;
}
}
if (i < WIDTH*(HEIGHT - 1))
{
if (i%WIDTH != 0)
{
if (dev_s[type*SIZE + i + WIDTH - 1]) // Bottom-Left Corner
numOfNeighbors++;
}
if (dev_s[type*SIZE + i + WIDTH]) // Bottom-Center Edge
numOfNeighbors++;
if (i%WIDTH != WIDTH - 1)
{
if (dev_s[type*SIZE + i + WIDTH + 1]) // Bottom-Right Corner
numOfNeighbors++;
}
}
if (i%WIDTH != 0)
{
if (dev_s[type*SIZE + i - 1]) // Middle-Left Edge
numOfNeighbors++;
}
if (i%WIDTH != WIDTH - 1)
{
if (dev_s[type*SIZE + i + 1]) // Middle-Right Edge
numOfNeighbors++;
}
// Rules of game of life for next state
if (numOfNeighbors < 2) // Less than 2, underpopulated
temp[i + type*SIZE] = false;
else if ((numOfNeighbors == 2) && (!dev_s[type*SIZE + i])) // 2 neighbors, and currently dead, remain dead
temp[i + type*SIZE] = false;
else if (numOfNeighbors == 3) // 3 neighbors, revive/remain alive
temp[i + type*SIZE] = true;
else if (numOfNeighbors > 3) // More than 3, overpopulated
temp[i + type*SIZE] = false;
}
__global__ void setState(bool* dev_s, bool* temp)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
dev_s[i] = temp[i];
}
int main(int argc, char** argv)
{
cout << "Enter number of species: " << endl;
cin >> numOfSpecies;
if (numOfSpecies < 5)
{
cout << "Number of species is less than 5. Default to 5." << endl;
numOfSpecies = 5;
}
else if (numOfSpecies > 10)
{
cout << "Number of species is greater than 10. Default to 10." << endl;
numOfSpecies = 10;
}
glutInit(&argc, argv);
// Initialize the color schemes and initial grid
initializeColor();
initializeGrid();
gameOfLife();
return 0;
}
// Function to simulate the Game of Life
void gameOfLife()
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (one input, one output)
cudaStatus = hipMalloc((void**)&dev_species, SIZE * numOfSpecies * sizeof(bool));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_tmp, SIZE * numOfSpecies * sizeof(bool));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_species, species, SIZE * numOfSpecies * sizeof(bool), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_tmp, tmp, SIZE * numOfSpecies * sizeof(bool), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Initialize OpenGL
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(0, 0);
glutCreateWindow("Game of Life - Multiple Species");
// Set timer to recall every 33ms for 30FPS
glutTimerFunc(1, glutTimer, 1);
glutKeyboardFunc(keyboard);
// Set display function that will be called
glutDisplayFunc(display);
// Call OpenGL main loop
glutMainLoop();
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
}
Error:
hipFree(dev_species);
hipFree(dev_tmp);
}
void keyboard(unsigned char key, int x, int y)
{
switch (key) {
case(27):
hipFree(dev_species);
hipFree(dev_tmp);
exit(0);
}
}
void glutTimer(int value)
{
glutPostRedisplay();
glutTimerFunc(1, glutTimer, 1);
}
void initializeColor()
{
for (int i = 0; i < 10; i++)
{
color[i] = new float[3];
}
color[0][0] = 1.0, color[0][1] = 0.0, color[0][2] = 0.0;
color[1][0] = 0.0, color[1][1] = 1.0, color[1][2] = 0.0;
color[2][0] = 0.0, color[2][1] = 0.0, color[2][2] = 1.0;
color[3][0] = 1.0, color[3][1] = 1.0, color[3][2] = 0.0;
color[4][0] = 0.0, color[4][1] = 1.0, color[4][2] = 1.0;
color[5][0] = 1.0, color[5][1] = 0.0, color[5][2] = 1.0;
color[6][0] = 1.0, color[6][1] = 1.0, color[6][2] = 1.0;
color[7][0] = 0.5, color[7][1] = 0.75, color[7][2] = 0.33;
color[8][0] = 0.33, color[8][1] = 0.5, color[8][2] = 0.75;
color[9][0] = 0.75, color[9][1] = 0.33, color[9][2] = 0.5;
}
void initializeGrid()
{
species = new bool[numOfSpecies*SIZE];
tmp = new bool[SIZE*numOfSpecies];
for (int i = 0; i < numOfSpecies*SIZE; i++)
{
species[i] = false;
tmp[i] = false;
}
// Random number generation
default_random_engine generator;
uniform_int_distribution<int> distribution(1, numOfSpecies);
uniform_int_distribution<int> state_distribution(0, 1);
for (int i = 0; i < SIZE; i++)
{
int type = distribution(generator); // Species
if (state_distribution(generator) == 1)
species[(type - 1)*SIZE + i] = true; // Alive
//else dead
}
}
void display()
{
// Call draw function to display grid
draw();
glutSwapBuffers();
//system("pause");
hipError_t cudaStatus;
// Call threads for each species to check conditions
for (int i = 0; i < numOfSpecies; i++)
{
hipLaunchKernelGGL(( countNeighbors), dim3(1024), dim3(768), 0, 0, dev_species, dev_tmp, i);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
}
hipLaunchKernelGGL(( setState), dim3(2048*numOfSpecies), dim3(384), 0, 0, dev_species, dev_tmp);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
cudaStatus = hipMemcpy(species, dev_species, SIZE * numOfSpecies * sizeof(bool), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
}
void draw()
{
glClear(GL_COLOR_BUFFER_BIT);
glPolygonMode(GL_FRONT_AND_BACK, GL_QUADS);
// Variables used to draw each pixel and define color
GLfloat x;
GLfloat y = 1.0;
GLfloat red = 0.0;
GLfloat blue = 0.0;
GLfloat green = 0.0;
float factor;
for (int i = 0; i<HEIGHT; i++)
{
x = -1.0;
for (int j = 0; j<WIDTH; j++)
{
factor = 0.0;
red = 0.0;
blue = 0.0;
green = 0.0;
glBegin(GL_POLYGON);
//Choose color
for (int k = 0; k < numOfSpecies; k++)
{
if (species[k*SIZE + i*WIDTH + j])
{
// Increase the factor based on number of live species on current pixel
factor++;
red += color[k][0];
green += color[k][1];
blue += color[k][2];
}
}
if (factor != 0)
glColor3f(red / factor, green / factor, blue / factor);
else
glColor3f(red, blue, green); //black
glVertex2f(x, y - Y_SIZE);
glVertex2f(x, y);
glVertex2f(x + X_SIZE, y);
glVertex2f(x + X_SIZE, y - Y_SIZE);
glEnd();
x += X_SIZE;
}
y -= Y_SIZE;
}
}
|
d4d50649e5cb5e30dd550417140407f87d552a09.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#include "Dependencies\glew\glew.h"
#include "Dependencies\freeglut\freeglut.h"
#include <stdlib.h>
#include <random>
#include <stdio.h>
using namespace std;
#define WIDTH 1024
#define HEIGHT 768
#define SIZE 786432
#define X_SIZE 0.001953125 // 2/1024
#define Y_SIZE 0.002604166 // 2/768
int numOfSpecies;
float** color = new float*[10];
bool* species;
bool* tmp;
bool* dev_species = 0;
bool* dev_tmp = 0;
void glutTimer(int value);
void initializeColor();
void initializeGrid();
void keyboard(unsigned char key, int x, int y);
void display();
void draw();
void gameOfLife();
__global__ void countNeighbors(bool* dev_s, bool* temp, int type)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int numOfNeighbors = 0;
if (i >= WIDTH)
{
if (i%WIDTH != 0)
{
if (dev_s[type*SIZE + i - WIDTH - 1]) // Top-Left Corner
numOfNeighbors++;
}
if (dev_s[type*SIZE + i - WIDTH]) // Top-Center Edge
numOfNeighbors++;
if (i%WIDTH != WIDTH - 1)
{
if (dev_s[type*SIZE + i - WIDTH + 1]) // Top-Right Corner
numOfNeighbors++;
}
}
if (i < WIDTH*(HEIGHT - 1))
{
if (i%WIDTH != 0)
{
if (dev_s[type*SIZE + i + WIDTH - 1]) // Bottom-Left Corner
numOfNeighbors++;
}
if (dev_s[type*SIZE + i + WIDTH]) // Bottom-Center Edge
numOfNeighbors++;
if (i%WIDTH != WIDTH - 1)
{
if (dev_s[type*SIZE + i + WIDTH + 1]) // Bottom-Right Corner
numOfNeighbors++;
}
}
if (i%WIDTH != 0)
{
if (dev_s[type*SIZE + i - 1]) // Middle-Left Edge
numOfNeighbors++;
}
if (i%WIDTH != WIDTH - 1)
{
if (dev_s[type*SIZE + i + 1]) // Middle-Right Edge
numOfNeighbors++;
}
// Rules of game of life for next state
if (numOfNeighbors < 2) // Less than 2, underpopulated
temp[i + type*SIZE] = false;
else if ((numOfNeighbors == 2) && (!dev_s[type*SIZE + i])) // 2 neighbors, and currently dead, remain dead
temp[i + type*SIZE] = false;
else if (numOfNeighbors == 3) // 3 neighbors, revive/remain alive
temp[i + type*SIZE] = true;
else if (numOfNeighbors > 3) // More than 3, overpopulated
temp[i + type*SIZE] = false;
}
__global__ void setState(bool* dev_s, bool* temp)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
dev_s[i] = temp[i];
}
int main(int argc, char** argv)
{
cout << "Enter number of species: " << endl;
cin >> numOfSpecies;
if (numOfSpecies < 5)
{
cout << "Number of species is less than 5. Default to 5." << endl;
numOfSpecies = 5;
}
else if (numOfSpecies > 10)
{
cout << "Number of species is greater than 10. Default to 10." << endl;
numOfSpecies = 10;
}
glutInit(&argc, argv);
// Initialize the color schemes and initial grid
initializeColor();
initializeGrid();
gameOfLife();
return 0;
}
// Function to simulate the Game of Life
void gameOfLife()
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (one input, one output)
cudaStatus = cudaMalloc((void**)&dev_species, SIZE * numOfSpecies * sizeof(bool));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_tmp, SIZE * numOfSpecies * sizeof(bool));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_species, species, SIZE * numOfSpecies * sizeof(bool), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_tmp, tmp, SIZE * numOfSpecies * sizeof(bool), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Initialize OpenGL
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(0, 0);
glutCreateWindow("Game of Life - Multiple Species");
// Set timer to recall every 33ms for 30FPS
glutTimerFunc(1, glutTimer, 1);
glutKeyboardFunc(keyboard);
// Set display function that will be called
glutDisplayFunc(display);
// Call OpenGL main loop
glutMainLoop();
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
}
Error:
cudaFree(dev_species);
cudaFree(dev_tmp);
}
void keyboard(unsigned char key, int x, int y)
{
switch (key) {
case(27):
cudaFree(dev_species);
cudaFree(dev_tmp);
exit(0);
}
}
void glutTimer(int value)
{
glutPostRedisplay();
glutTimerFunc(1, glutTimer, 1);
}
void initializeColor()
{
for (int i = 0; i < 10; i++)
{
color[i] = new float[3];
}
color[0][0] = 1.0, color[0][1] = 0.0, color[0][2] = 0.0;
color[1][0] = 0.0, color[1][1] = 1.0, color[1][2] = 0.0;
color[2][0] = 0.0, color[2][1] = 0.0, color[2][2] = 1.0;
color[3][0] = 1.0, color[3][1] = 1.0, color[3][2] = 0.0;
color[4][0] = 0.0, color[4][1] = 1.0, color[4][2] = 1.0;
color[5][0] = 1.0, color[5][1] = 0.0, color[5][2] = 1.0;
color[6][0] = 1.0, color[6][1] = 1.0, color[6][2] = 1.0;
color[7][0] = 0.5, color[7][1] = 0.75, color[7][2] = 0.33;
color[8][0] = 0.33, color[8][1] = 0.5, color[8][2] = 0.75;
color[9][0] = 0.75, color[9][1] = 0.33, color[9][2] = 0.5;
}
void initializeGrid()
{
species = new bool[numOfSpecies*SIZE];
tmp = new bool[SIZE*numOfSpecies];
for (int i = 0; i < numOfSpecies*SIZE; i++)
{
species[i] = false;
tmp[i] = false;
}
// Random number generation
default_random_engine generator;
uniform_int_distribution<int> distribution(1, numOfSpecies);
uniform_int_distribution<int> state_distribution(0, 1);
for (int i = 0; i < SIZE; i++)
{
int type = distribution(generator); // Species
if (state_distribution(generator) == 1)
species[(type - 1)*SIZE + i] = true; // Alive
//else dead
}
}
void display()
{
// Call draw function to display grid
draw();
glutSwapBuffers();
//system("pause");
cudaError_t cudaStatus;
// Call threads for each species to check conditions
for (int i = 0; i < numOfSpecies; i++)
{
countNeighbors<<<1024, 768>>>(dev_species, dev_tmp, i);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
}
setState<<<2048*numOfSpecies, 384>>>(dev_species, dev_tmp);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaStatus = cudaMemcpy(species, dev_species, SIZE * numOfSpecies * sizeof(bool), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
}
void draw()
{
glClear(GL_COLOR_BUFFER_BIT);
glPolygonMode(GL_FRONT_AND_BACK, GL_QUADS);
// Variables used to draw each pixel and define color
GLfloat x;
GLfloat y = 1.0;
GLfloat red = 0.0;
GLfloat blue = 0.0;
GLfloat green = 0.0;
float factor;
for (int i = 0; i<HEIGHT; i++)
{
x = -1.0;
for (int j = 0; j<WIDTH; j++)
{
factor = 0.0;
red = 0.0;
blue = 0.0;
green = 0.0;
glBegin(GL_POLYGON);
//Choose color
for (int k = 0; k < numOfSpecies; k++)
{
if (species[k*SIZE + i*WIDTH + j])
{
// Increase the factor based on number of live species on current pixel
factor++;
red += color[k][0];
green += color[k][1];
blue += color[k][2];
}
}
if (factor != 0)
glColor3f(red / factor, green / factor, blue / factor);
else
glColor3f(red, blue, green); //black
glVertex2f(x, y - Y_SIZE);
glVertex2f(x, y);
glVertex2f(x + X_SIZE, y);
glVertex2f(x + X_SIZE, y - Y_SIZE);
glEnd();
x += X_SIZE;
}
y -= Y_SIZE;
}
}
|
48975997f0095c198039f05952d8a0aba9e0ed68.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
using namespace std;
__global__ void add_neighbour(int *A, int offset) {
int tid = threadIdx.x;
/* int a = A[tid + offset];
printf("%d, %d\n", tid, a);
__syncthreads();
A[tid + offset] = A[tid];
__syncthreads();
A[tid] = A[tid + offset];
printf("%d, %d\n",tid ,A[tid]);*/
A[tid] += A[tid];//A[tid + offset];
}
const int N = 2;
const int threadsPerBlock = 2;
const int blockPerGrid = 1;
int main(){
int* A, *devA;
A = new int[N];
hipMalloc((void **) &devA, sizeof(int) * N);
for (int i = 0; i < N; i++)
A[i] = i;
hipMemcpy(devA, A, N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add_neighbour), dim3(blockPerGrid), dim3(threadsPerBlock), 0, 0, devA, N);
hipMemcpy(A, devA, N * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0 ; i < N; i++)
printf("%d \n", A[i]);
return 1;
}
|
48975997f0095c198039f05952d8a0aba9e0ed68.cu
|
#include<stdio.h>
#include<iostream>
using namespace std;
__global__ void add_neighbour(int *A, int offset) {
int tid = threadIdx.x;
/* int a = A[tid + offset];
printf("%d, %d\n", tid, a);
__syncthreads();
A[tid + offset] = A[tid];
__syncthreads();
A[tid] = A[tid + offset];
printf("%d, %d\n",tid ,A[tid]);*/
A[tid] += A[tid];//A[tid + offset];
}
const int N = 2;
const int threadsPerBlock = 2;
const int blockPerGrid = 1;
int main(){
int* A, *devA;
A = new int[N];
cudaMalloc((void **) &devA, sizeof(int) * N);
for (int i = 0; i < N; i++)
A[i] = i;
cudaMemcpy(devA, A, N * sizeof(int), cudaMemcpyHostToDevice);
add_neighbour<<<blockPerGrid, threadsPerBlock>>>(devA, N);
cudaMemcpy(A, devA, N * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0 ; i < N; i++)
printf("%d \n", A[i]);
return 1;
}
|
25788436927461d198f71ca854f81aad85029be1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
|
25788436927461d198f71ca854f81aad85029be1.cu
|
#include "includes.h"
__global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
|
7ba6d92ed29da2a3c58d25e4d26d5579123ffc31.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_erfc (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(erfc)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
}
|
7ba6d92ed29da2a3c58d25e4d26d5579123ffc31.cu
|
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_erfc (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(erfc)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
}
|
930c7a042f00ccbeef917d47884aed9e52a6286c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "apply_grad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *grad = NULL;
hipMalloc(&grad, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
apply_grad), dim3(gridBlock),dim3(threadBlock), 0, 0, output,grad,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
apply_grad), dim3(gridBlock),dim3(threadBlock), 0, 0, output,grad,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
apply_grad), dim3(gridBlock),dim3(threadBlock), 0, 0, output,grad,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
930c7a042f00ccbeef917d47884aed9e52a6286c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "apply_grad.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *grad = NULL;
cudaMalloc(&grad, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
apply_grad<<<gridBlock,threadBlock>>>(output,grad,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
apply_grad<<<gridBlock,threadBlock>>>(output,grad,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
apply_grad<<<gridBlock,threadBlock>>>(output,grad,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4d5964ffed762889ec969ef7fd3a48b9050f693d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Kernel
__global__ void bit_shift_test()
{
unsigned long long one = 1;
for (int i = 0; i <= 64; i++) {
printf("(%llu << %d) - 1 = %llu\n", one, i, (one << i) - 1);
}
unsigned long long max = 18446744073709551615;
for (int i = 0; i <= 64; i++) {
printf("%llu >> %d = %llu\n", max, i, max >> i);
}
}
// Main program
int main()
{
// Launch kernel
hipLaunchKernelGGL(( bit_shift_test), dim3(1), dim3(1), 0, 0, );
hipError_t cuErrSync = hipGetLastError();
hipError_t cuErrAsync = hipDeviceSynchronize();
if (cuErrSync != hipSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErrSync)); exit(0); }
if (cuErrAsync != hipSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErrAsync)); exit(0); }
return 0;
}
|
4d5964ffed762889ec969ef7fd3a48b9050f693d.cu
|
#include <stdio.h>
// Kernel
__global__ void bit_shift_test()
{
unsigned long long one = 1;
for (int i = 0; i <= 64; i++) {
printf("(%llu << %d) - 1 = %llu\n", one, i, (one << i) - 1);
}
unsigned long long max = 18446744073709551615;
for (int i = 0; i <= 64; i++) {
printf("%llu >> %d = %llu\n", max, i, max >> i);
}
}
// Main program
int main()
{
// Launch kernel
bit_shift_test<<<1, 1>>>();
cudaError_t cuErrSync = cudaGetLastError();
cudaError_t cuErrAsync = cudaDeviceSynchronize();
if (cuErrSync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrSync)); exit(0); }
if (cuErrAsync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrAsync)); exit(0); }
return 0;
}
|
c6868b6f0ed20116b1ebd7c3c2db4a5f698fb8dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T, int D>
__global__ void _Transpose(
const int nthreads,
const int num_dims,
const SimpleArray<int, D> x_strides,
const SimpleArray<int, D> y_dims,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
int xi = 0, tmp = yi;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(y_dims.data[d], tmp, &tmp, &r);
xi += r * x_strides.data[d];
}
y[yi] = x[xi];
}
}
template <typename T, int D>
__global__ void _TransposeGrad(
const int nthreads,
const int num_dims,
const SimpleArray<int, D> x_strides,
const SimpleArray<int, D> y_dims,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
int xi = 0, tmp = yi;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(y_dims.data[d], tmp, &tmp, &r);
xi += r * x_strides.data[d];
}
dx[xi] = dy[yi];
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(name, T) \
template <> \
void name<T, CUDAContext>( \
const int num_dims, \
const int64_t* x_strides, \
const int64_t* y_dims, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_strides, Y_dims; \
const auto nthreads = std::accumulate( \
y_dims, y_dims + num_dims, 1, std::multiplies<int64_t>()); \
for (int i = 0; i < num_dims; ++i) { \
X_strides.data[i] = x_strides[i]; \
Y_dims.data[i] = y_dims[i]; \
} \
hipLaunchKernelGGL(( _##name), dim3(CUDA_BLOCKS(nthreads)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
nthreads, num_dims, X_strides, Y_dims, x, y); \
}
DEFINE_KERNEL_LAUNCHER(Transpose, bool);
DEFINE_KERNEL_LAUNCHER(Transpose, int8_t);
DEFINE_KERNEL_LAUNCHER(Transpose, uint8_t);
DEFINE_KERNEL_LAUNCHER(Transpose, int);
DEFINE_KERNEL_LAUNCHER(Transpose, int64_t);
DEFINE_KERNEL_LAUNCHER(Transpose, float16);
DEFINE_KERNEL_LAUNCHER(Transpose, float);
DEFINE_KERNEL_LAUNCHER(Transpose, double);
DEFINE_KERNEL_LAUNCHER(TransposeGrad, float16);
DEFINE_KERNEL_LAUNCHER(TransposeGrad, float);
DEFINE_KERNEL_LAUNCHER(TransposeGrad, double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
|
c6868b6f0ed20116b1ebd7c3c2db4a5f698fb8dd.cu
|
#ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T, int D>
__global__ void _Transpose(
const int nthreads,
const int num_dims,
const SimpleArray<int, D> x_strides,
const SimpleArray<int, D> y_dims,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
int xi = 0, tmp = yi;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(y_dims.data[d], tmp, &tmp, &r);
xi += r * x_strides.data[d];
}
y[yi] = x[xi];
}
}
template <typename T, int D>
__global__ void _TransposeGrad(
const int nthreads,
const int num_dims,
const SimpleArray<int, D> x_strides,
const SimpleArray<int, D> y_dims,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
int xi = 0, tmp = yi;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(y_dims.data[d], tmp, &tmp, &r);
xi += r * x_strides.data[d];
}
dx[xi] = dy[yi];
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(name, T) \
template <> \
void name<T, CUDAContext>( \
const int num_dims, \
const int64_t* x_strides, \
const int64_t* y_dims, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_strides, Y_dims; \
const auto nthreads = std::accumulate( \
y_dims, y_dims + num_dims, 1, std::multiplies<int64_t>()); \
for (int i = 0; i < num_dims; ++i) { \
X_strides.data[i] = x_strides[i]; \
Y_dims.data[i] = y_dims[i]; \
} \
_##name<<<CUDA_BLOCKS(nthreads), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
nthreads, num_dims, X_strides, Y_dims, x, y); \
}
DEFINE_KERNEL_LAUNCHER(Transpose, bool);
DEFINE_KERNEL_LAUNCHER(Transpose, int8_t);
DEFINE_KERNEL_LAUNCHER(Transpose, uint8_t);
DEFINE_KERNEL_LAUNCHER(Transpose, int);
DEFINE_KERNEL_LAUNCHER(Transpose, int64_t);
DEFINE_KERNEL_LAUNCHER(Transpose, float16);
DEFINE_KERNEL_LAUNCHER(Transpose, float);
DEFINE_KERNEL_LAUNCHER(Transpose, double);
DEFINE_KERNEL_LAUNCHER(TransposeGrad, float16);
DEFINE_KERNEL_LAUNCHER(TransposeGrad, float);
DEFINE_KERNEL_LAUNCHER(TransposeGrad, double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
ad33e8c9fcc341c156ffefcf34bde6900850302a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <ctime>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <roctracer/roctx.h>
const int n_elements = 32 * 1024 * 1024; // number of elements to reduce
using namespace std;
struct DIMS
{
dim3 dimBlock;
dim3 dimGrid;
};
struct DIMS2
{
int dimThreads;
int dimBlocks;
};
#define CUDA(call) do { \
hipError_t e = (call); \
if (e == hipSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, hipGetErrorString(e), e); \
exit(1); \
} while (0)
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
double diffclock( clock_t clock1, clock_t clock2 )
{
double diffticks = clock1 - clock2;
double diffms = diffticks / ( CLOCKS_PER_SEC / 1000.0);
return diffms;
}
// Check errors
void postprocess(const int *ref, const int *res, int n)
{
bool passed = true;
for (int i = 0; i < n; i++)
{
if (res[i] != ref[i])
{
printf("ID:%d \t Res:%d \t Ref:%d\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
}
if(passed)
printf("Post process PASSED!!!\n");
}
void preprocess(float *res, float *dev_res, int n)
{
for (int i = 0; i < n; i++)
{
res[i] = -1;
}
hipMemset(dev_res, -1, n * sizeof(float));
}
__global__ void copyKernel(const int* __restrict__ const a,
int* __restrict__ const b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // index
b[i] = a[i]; // copy
}
static int reduce_cpu(int *data, int n)
{
int sum = 0;
for (int i = 0; i < n; i++)
sum += data[i];
return sum;
}
// INTERLEAVED ADDRESSING
// TODO put your kernel here
__global__ void reduce_stage0(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Copy input data to shared memory
//Reduce within block
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// INTERLEAVED ADDRESSING NON DIVERGENT
// TODO put your kernel here
__global__ void reduce_stage1(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0;
//Copy input data to shared memory
//Reduce within block with coalesced indexing pattern
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// WARP MANAGEMENT WITHOUT BANK CONFLICT
// TODO put your kernel here
__global__ void reduce_stage2(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0;
//Copy input data to shared memory
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// ADD DURING LOAD - USE HALF THE BLOCKS
// TODO put your kernel here
const int stage3_TILE = 2;
__global__ void reduce_stage3(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0; //EACH BLOCK DOES WORK OF stage3_TILE*blockDim.x NO. OF ELEMENTS
//Copy input data to shared memory. Add on load.
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//HINT: This part is same as stage2
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// WARP LOOP UNROLLING
// TODO put your kernel here
__device__ void stage4_warpReduce(volatile int* smem, int tid)
{
//Write code for warp reduce here
}
const int stage4_TILE = 2; //Tune this
__global__ void reduce_stage4(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0; //EACH BLOCK DOES WORK OF stage4_TILE*blockDim.x NO. OF ELEMENTS
//Copy input data to shared memory. Add on load.
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//HINT: This part is similar to stage3, is different in terms of warp reduction
//Use stage4_warpReduce
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// COMPLETELY UNROLLED BLOCKS - USING TEMPLATES
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* smem, int tid)
{
//Write code for warp reduce here. Same has stage 4 warp reduce
}
// TODO put your kernel here
const int stage5_TILE = 2; //Tune this
template<unsigned int blockSize>
__global__ void reduce_stage5(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0; //EACH BLOCK DOES WORK OF stage4_TILE*blockDim.x NO. OF ELEMENTS
//Copy input data to shared memory. Add on load.
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//HINT: Explicity unroll the loop
//Call the correct warpReduce
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
int main()
{
//Run Memcpy benchmarks
nvtxRangeId_t cudaBenchmark = roctxRangeStart("CUDA Memcpy Benchmark");
#if defined WIN64
system(".\\..\\bin\\cudaBenchmark.exe");
#elif defined LINUX
system("./bin/cudaBenchmark");
#endif
roctxRangeStop(cudaBenchmark);
///////////////////////////////////////////////////////////////////////////////////////////////
//Allocate memory and initialize elements
unsigned bytes = n_elements * sizeof(int);
int *h_idata;
CUDA( hipHostMalloc((void**)&h_idata, bytes) ); //Using Pinned Memory
for (int i=0; i < n_elements; i++)
h_idata[i] = (int)(rand() & 0xFF);
// copy data to device memory
int *d_idata = NULL;
CUDA(hipMalloc((void **) &d_idata, bytes));
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
//Compute Gold Standard using CPU
const int gold_result = reduce_cpu(h_idata, n_elements);
// Create CUDA events for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
#define CPU_REDUCE
#ifdef CPU_REDUCE
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***CPU Reduce***" << endl;
{
// start the timer
int cpu_result = -1;
nvtxRangeId_t cpuBenchmark = roctxRangeStart("CPU Reduce Benchmark");
clock_t begin = clock();
int iters = 100;
for (int k=0; k<iters; k++)
{
cpu_result = reduce_cpu(h_idata, n_elements);
}
// stop the timer
clock_t end = clock();
roctxRangeStop(cpuBenchmark);
float time = 0.0f;
time = (float)diffclock(end, begin);
// print out the time required for the kernel to finish the transpose operation
//double Bandwidth = (double)iters*2.0*1000.0*(double)(bytes) / (1000.0*1000.0*1000.0*time);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
//cout << "Bandwidth (GB/s) = " << Bandwidth << endl;
//Check Result
if(cpu_result == gold_result)
cout << "Post process check PASSED!!!" << endl;
else
cout << "Post process check FAILED:-(" << endl;
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
//Compute Pinned Memory Copy Benchmark
cout << "******************************************" << endl;
cout << "***Device To Device Copy***" << endl;
{
// Assign a 1D distribution of threads per blocks
// Calculate number of blocks required
DIMS2 dims;
dims.dimThreads = 1024;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// start the timer
nvtxRangeId_t d2dBenchmark = roctxRangeStart("Device to Device Copy");
hipEventRecord( start, 0);
int *d_odata;
CUDA( hipMalloc((void **) &d_odata, bytes) );
int iters = 100;
for (int i=0; i<iters; i++)
{
// Launch the GPU kernel
hipLaunchKernelGGL(( copyKernel), dim3(dims.dimBlocks), dim3(dims.dimThreads), 0, 0, d_idata, d_odata);
}
// stop the timer
hipEventRecord( stop, 0);
hipEventSynchronize( stop );
roctxRangeStop(d2dBenchmark);
float time = 0.0f;
hipEventElapsedTime( &time, start, stop);
// print out the time required for the kernel to finish the transpose operation
double Bandwidth = (double)iters*2.0*1000.0*(double)(bytes) /
(1000.0*1000.0*1000.0*time); //3.0 for read of A and read and write of B
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
cout << "Bandwidth (GB/s) = " << Bandwidth << endl;
// copy the answer back to the host (CPU) from the device (GPU)
int *h_odata; CUDA( hipHostMalloc((void**)&h_odata, bytes) );
hipMemcpy(h_odata, d_odata, bytes, hipMemcpyDeviceToHost);
postprocess(h_idata, h_odata, n_elements);
CUDA( hipHostFree(h_odata) );
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#if 0
//Compute GPU Reduce Benchmarks
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 0***" << endl;
{
//Calculate Threads per block and total blocks required
//HINT: Look at copy kernel dims computed above
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1;
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
hipLaunchKernelGGL(( reduce_stage0), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(hipEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
hipLaunchKernelGGL(( reduce_stage0), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(hipEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
hipFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 1***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1;
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
hipLaunchKernelGGL(( reduce_stage1), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(hipEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
hipLaunchKernelGGL(( reduce_stage1), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(hipEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
hipFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 2***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1;
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
hipLaunchKernelGGL(( reduce_stage2), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(hipEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
hipLaunchKernelGGL(( reduce_stage2), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(hipEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
hipFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 3***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1; //Don't forget to take TILE into account while computing blocks
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
hipLaunchKernelGGL(( reduce_stage3), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(hipEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
hipLaunchKernelGGL(( reduce_stage3), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(hipEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
hipFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 4***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1; //Don't forget to take TILE into account while computing blocks
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
hipLaunchKernelGGL(( reduce_stage4), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(hipEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
hipLaunchKernelGGL(( reduce_stage4), dim3(dims.dimBlocks), dim3(dims.dimThreads/*),Declare appropriate shared memory space*/, 0, 0, d_idata, d_odata, n_elements);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(hipEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
hipFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 5***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
const int threads = 1; //We can use this in templates
dims.dimThreads = threads;
dims.dimBlocks = 1; //Don't forget to take TILE into account while computing blocks
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
//Don't forget to add the template
//reduce_stage5<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(hipEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
//Don't forget to add the template
//reduce_stage5<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(hipEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
hipFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
////////////////////////////////////////////////////////////
//CLEANUP
CUDA( hipEventDestroy(start) );
CUDA( hipEventDestroy(stop ) );
CUDA( hipHostFree(h_idata) );
CUDA( hipFree(d_idata) );
system("pause");
return 0;
}
|
ad33e8c9fcc341c156ffefcf34bde6900850302a.cu
|
#include <stdio.h>
#include <iostream>
#include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <nvToolsExt.h>
const int n_elements = 32 * 1024 * 1024; // number of elements to reduce
using namespace std;
struct DIMS
{
dim3 dimBlock;
dim3 dimGrid;
};
struct DIMS2
{
int dimThreads;
int dimBlocks;
};
#define CUDA(call) do { \
cudaError_t e = (call); \
if (e == cudaSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, cudaGetErrorString(e), e); \
exit(1); \
} while (0)
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
double diffclock( clock_t clock1, clock_t clock2 )
{
double diffticks = clock1 - clock2;
double diffms = diffticks / ( CLOCKS_PER_SEC / 1000.0);
return diffms;
}
// Check errors
void postprocess(const int *ref, const int *res, int n)
{
bool passed = true;
for (int i = 0; i < n; i++)
{
if (res[i] != ref[i])
{
printf("ID:%d \t Res:%d \t Ref:%d\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
}
if(passed)
printf("Post process PASSED!!!\n");
}
void preprocess(float *res, float *dev_res, int n)
{
for (int i = 0; i < n; i++)
{
res[i] = -1;
}
cudaMemset(dev_res, -1, n * sizeof(float));
}
__global__ void copyKernel(const int* __restrict__ const a,
int* __restrict__ const b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // index
b[i] = a[i]; // copy
}
static int reduce_cpu(int *data, int n)
{
int sum = 0;
for (int i = 0; i < n; i++)
sum += data[i];
return sum;
}
// INTERLEAVED ADDRESSING
// TODO put your kernel here
__global__ void reduce_stage0(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Copy input data to shared memory
//Reduce within block
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// INTERLEAVED ADDRESSING NON DIVERGENT
// TODO put your kernel here
__global__ void reduce_stage1(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0;
//Copy input data to shared memory
//Reduce within block with coalesced indexing pattern
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// WARP MANAGEMENT WITHOUT BANK CONFLICT
// TODO put your kernel here
__global__ void reduce_stage2(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0;
//Copy input data to shared memory
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// ADD DURING LOAD - USE HALF THE BLOCKS
// TODO put your kernel here
const int stage3_TILE = 2;
__global__ void reduce_stage3(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0; //EACH BLOCK DOES WORK OF stage3_TILE*blockDim.x NO. OF ELEMENTS
//Copy input data to shared memory. Add on load.
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//HINT: This part is same as stage2
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// WARP LOOP UNROLLING
// TODO put your kernel here
__device__ void stage4_warpReduce(volatile int* smem, int tid)
{
//Write code for warp reduce here
}
const int stage4_TILE = 2; //Tune this
__global__ void reduce_stage4(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0; //EACH BLOCK DOES WORK OF stage4_TILE*blockDim.x NO. OF ELEMENTS
//Copy input data to shared memory. Add on load.
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//HINT: This part is similar to stage3, is different in terms of warp reduction
//Use stage4_warpReduce
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
// COMPLETELY UNROLLED BLOCKS - USING TEMPLATES
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* smem, int tid)
{
//Write code for warp reduce here. Same has stage 4 warp reduce
}
// TODO put your kernel here
const int stage5_TILE = 2; //Tune this
template<unsigned int blockSize>
__global__ void reduce_stage5(int* d_idata, int* d_odata, int n)
{
//Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ int smem[];
//Calculate index
int idx = 0; //EACH BLOCK DOES WORK OF stage4_TILE*blockDim.x NO. OF ELEMENTS
//Copy input data to shared memory. Add on load.
//Reduce within block with coalesced indexing pattern and avoid bank conflicts
//HINT: Explicity unroll the loop
//Call the correct warpReduce
//Copy result of reduction to global memory
if(threadIdx.x == 0)
d_odata[blockIdx.x] = smem[0];
}
int main()
{
//Run Memcpy benchmarks
nvtxRangeId_t cudaBenchmark = nvtxRangeStart("CUDA Memcpy Benchmark");
#if defined WIN64
system(".\\..\\bin\\cudaBenchmark.exe");
#elif defined LINUX
system("./bin/cudaBenchmark");
#endif
nvtxRangeEnd(cudaBenchmark);
///////////////////////////////////////////////////////////////////////////////////////////////
//Allocate memory and initialize elements
unsigned bytes = n_elements * sizeof(int);
int *h_idata;
CUDA( cudaMallocHost((void**)&h_idata, bytes) ); //Using Pinned Memory
for (int i=0; i < n_elements; i++)
h_idata[i] = (int)(rand() & 0xFF);
// copy data to device memory
int *d_idata = NULL;
CUDA(cudaMalloc((void **) &d_idata, bytes));
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
//Compute Gold Standard using CPU
const int gold_result = reduce_cpu(h_idata, n_elements);
// Create CUDA events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#define CPU_REDUCE
#ifdef CPU_REDUCE
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***CPU Reduce***" << endl;
{
// start the timer
int cpu_result = -1;
nvtxRangeId_t cpuBenchmark = nvtxRangeStart("CPU Reduce Benchmark");
clock_t begin = clock();
int iters = 100;
for (int k=0; k<iters; k++)
{
cpu_result = reduce_cpu(h_idata, n_elements);
}
// stop the timer
clock_t end = clock();
nvtxRangeEnd(cpuBenchmark);
float time = 0.0f;
time = (float)diffclock(end, begin);
// print out the time required for the kernel to finish the transpose operation
//double Bandwidth = (double)iters*2.0*1000.0*(double)(bytes) / (1000.0*1000.0*1000.0*time);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
//cout << "Bandwidth (GB/s) = " << Bandwidth << endl;
//Check Result
if(cpu_result == gold_result)
cout << "Post process check PASSED!!!" << endl;
else
cout << "Post process check FAILED:-(" << endl;
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
//Compute Pinned Memory Copy Benchmark
cout << "******************************************" << endl;
cout << "***Device To Device Copy***" << endl;
{
// Assign a 1D distribution of threads per blocks
// Calculate number of blocks required
DIMS2 dims;
dims.dimThreads = 1024;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// start the timer
nvtxRangeId_t d2dBenchmark = nvtxRangeStart("Device to Device Copy");
cudaEventRecord( start, 0);
int *d_odata;
CUDA( cudaMalloc((void **) &d_odata, bytes) );
int iters = 100;
for (int i=0; i<iters; i++)
{
// Launch the GPU kernel
copyKernel<<<dims.dimBlocks, dims.dimThreads>>>(d_idata, d_odata);
}
// stop the timer
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
nvtxRangeEnd(d2dBenchmark);
float time = 0.0f;
cudaEventElapsedTime( &time, start, stop);
// print out the time required for the kernel to finish the transpose operation
double Bandwidth = (double)iters*2.0*1000.0*(double)(bytes) /
(1000.0*1000.0*1000.0*time); //3.0 for read of A and read and write of B
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
cout << "Bandwidth (GB/s) = " << Bandwidth << endl;
// copy the answer back to the host (CPU) from the device (GPU)
int *h_odata; CUDA( cudaMallocHost((void**)&h_odata, bytes) );
cudaMemcpy(h_odata, d_odata, bytes, cudaMemcpyDeviceToHost);
postprocess(h_idata, h_odata, n_elements);
CUDA( cudaFreeHost(h_odata) );
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#if 0
//Compute GPU Reduce Benchmarks
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 0***" << endl;
{
//Calculate Threads per block and total blocks required
//HINT: Look at copy kernel dims computed above
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1;
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
reduce_stage0<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(cudaEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
reduce_stage0<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
cudaFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 1***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1;
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
reduce_stage1<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(cudaEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
reduce_stage1<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
cudaFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 2***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1;
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
reduce_stage2<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(cudaEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
reduce_stage2<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
cudaFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 3***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1; //Don't forget to take TILE into account while computing blocks
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
reduce_stage3<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(cudaEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
reduce_stage3<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
cudaFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 4***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
dims.dimThreads = 1; //Start with any (preferable 2^n) threads per block. Then tune once working.
dims.dimBlocks = 1; //Don't forget to take TILE into account while computing blocks
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
reduce_stage4<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(cudaEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
reduce_stage4<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
cudaFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
#if 0
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Reduction Stage 5***" << endl;
{
//Calculate Threads per block and total blocks required
DIMS2 dims;
const int threads = 1; //We can use this in templates
dims.dimThreads = threads;
dims.dimBlocks = 1; //Don't forget to take TILE into account while computing blocks
printf("Elements %u Blocks %u Threads %u\n", n_elements, dims.dimBlocks, dims.dimThreads);
//Do once for error checking
int gpu_result = 0;
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
size_t block_bytes = dims.dimBlocks * sizeof(int);
int *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
//Don't forget to add the template
//reduce_stage5<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
// (2) reduce across all blocks -> Choose between CPU/GPU
int *h_blocks = (int *)malloc(block_bytes);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
printf("gpu %u gold %u \n", gpu_result, gold_result);
printf("Post process: ");
printf((gpu_result==gold_result) ? "PASSED!!!\n" : "FAILED:-(\n");
if(gpu_result == gold_result)
{
//Start Benchmark
int iters = 100;
CUDA(cudaEventRecord(start, 0));
for(int i = 0; i < iters; i++)
{
//Don't forget to add the template
//reduce_stage5<<<dims.dimBlocks, dims.dimThreads/*,Declare appropriate shared memory space*/>>>(d_idata, d_odata, n_elements);
CUDA( cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost) );
for (int i = 0; i < dims.dimBlocks; ++i)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
// that's the time your kernel took to run in ms!
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
double Bandwidth = iters * 1e-9 * bytes / (time_ms / 1e3);
cout << "Elapsed Time for " << iters << " runs = " << time << "ms" << endl;
printf("bandwidth %.2f GB/s\n", Bandwidth);
}
free(h_blocks);
cudaFree(d_odata);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
////////////////////////////////////////////////////////////
//CLEANUP
CUDA( cudaEventDestroy(start) );
CUDA( cudaEventDestroy(stop ) );
CUDA( cudaFreeHost(h_idata) );
CUDA( cudaFree(d_idata) );
system("pause");
return 0;
}
|
e00f278155ad5fda544745097e3701747760d476.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include "two_hop_neighbors.cuh"
#include "utilities/error_utils.h"
#include <rmm_utils.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
template<typename IndexType>
gdf_error gdf_get_two_hop_neighbors_impl(IndexType num_verts,
IndexType* offsets,
IndexType* indices,
IndexType** first,
IndexType** second,
IndexType& outputSize) {
// Get the number of edges from the adjacency representation
IndexType num_edges;
hipMemcpy(&num_edges, &offsets[num_verts], sizeof(IndexType), hipMemcpyDefault);
// Allocate memory for temporary stuff
IndexType *exsum_degree = nullptr;
IndexType *first_pair = nullptr;
IndexType *second_pair = nullptr;
IndexType *block_bucket_offsets = nullptr;
ALLOC_MANAGED_TRY(&exsum_degree, sizeof(IndexType) * (num_edges + 1), nullptr);
// Find the degree of the out vertex of each edge
degree_iterator<IndexType> deg_it(offsets);
deref_functor<degree_iterator<IndexType>, IndexType> deref(deg_it);
rmm_temp_allocator allocator(nullptr);
thrust::fill(thrust::hip::par(allocator).on(nullptr), exsum_degree, exsum_degree + 1, 0);
thrust::transform(thrust::hip::par(allocator).on(nullptr),
indices,
indices + num_edges,
exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(thrust::hip::par(allocator).on(nullptr),
exsum_degree + 1,
exsum_degree + num_edges + 1,
exsum_degree + 1);
// Copy out the last value to get the size of scattered output
IndexType output_size;
hipMemcpy(&output_size, &exsum_degree[num_edges], sizeof(IndexType), hipMemcpyDefault);
// Allocate memory for the scattered output
ALLOC_MANAGED_TRY(&second_pair, sizeof(IndexType) * output_size, nullptr);
ALLOC_MANAGED_TRY(&first_pair, sizeof(IndexType) * output_size, nullptr);
// Figure out number of blocks and allocate memory for block bucket offsets
IndexType num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
ALLOC_MANAGED_TRY(&block_bucket_offsets, sizeof(IndexType) * (num_blocks + 1), nullptr);
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((IndexType) MAXBLOCKS, (num_blocks / 512) + 1);
hipLaunchKernelGGL(( compute_bucket_offsets_kernel), dim3(grid), dim3(block), 0, nullptr, exsum_degree,
block_bucket_offsets,
num_edges,
output_size);
hipMemcpy(&block_bucket_offsets[num_blocks], &num_edges, sizeof(IndexType), hipMemcpyDefault);
// Scatter the expanded edge lists into temp space
grid.x = min((IndexType) MAXBLOCKS, num_blocks);
hipLaunchKernelGGL(( scatter_expand_kernel), dim3(grid), dim3(block), 0, nullptr, exsum_degree,
indices,
offsets,
block_bucket_offsets,
num_verts,
output_size,
num_blocks,
first_pair,
second_pair);
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(first_pair, second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(thrust::hip::par(allocator).on(nullptr), tuple_start, tuple_end);
tuple_end = thrust::copy_if(thrust::hip::par(allocator).on(nullptr),
tuple_start,
tuple_end,
tuple_start,
self_loop_flagger<IndexType>());
tuple_end = thrust::unique(thrust::hip::par(allocator).on(nullptr), tuple_start, tuple_end);
// Get things ready to return
outputSize = tuple_end - tuple_start;
ALLOC_MANAGED_TRY(first, sizeof(IndexType) * outputSize, nullptr);
ALLOC_MANAGED_TRY(second, sizeof(IndexType) * outputSize, nullptr);
hipMemcpy(*first, first_pair, sizeof(IndexType) * outputSize, hipMemcpyDefault);
hipMemcpy(*second, second_pair, sizeof(IndexType) * outputSize, hipMemcpyDefault);
// Free up temporary stuff
ALLOC_FREE_TRY(exsum_degree, nullptr);
ALLOC_FREE_TRY(first_pair, nullptr);
ALLOC_FREE_TRY(second_pair, nullptr);
ALLOC_FREE_TRY(block_bucket_offsets, nullptr);
return GDF_SUCCESS;
}
gdf_error gdf_get_two_hop_neighbors(gdf_graph* graph, gdf_column* first, gdf_column* second) {
GDF_REQUIRE(graph != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(first != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(second != nullptr, GDF_INVALID_API_CALL);
GDF_TRY(gdf_add_adj_list(graph));
size_t num_verts = graph->adjList->offsets->size - 1;
switch (graph->adjList->offsets->dtype) {
case GDF_INT32: {
int32_t* first_ptr;
int32_t* second_ptr;
int32_t outputSize;
gdf_get_two_hop_neighbors_impl((int32_t) num_verts,
(int32_t*) graph->adjList->offsets->data,
(int32_t*) graph->adjList->indices->data,
&first_ptr,
&second_ptr,
outputSize);
first->data = first_ptr;
first->dtype = GDF_INT32;
first->size = outputSize;
second->data = second_ptr;
second->dtype = GDF_INT32;
second->size = outputSize;
break;
}
case GDF_INT64: {
int64_t* first_ptr;
int64_t* second_ptr;
int64_t outputSize;
gdf_get_two_hop_neighbors_impl((int64_t) num_verts,
(int64_t*) graph->adjList->offsets->data,
(int64_t*) graph->adjList->indices->data,
&first_ptr,
&second_ptr,
outputSize);
first->data = first_ptr;
first->dtype = GDF_INT64;
first->size = outputSize;
second->data = second_ptr;
second->dtype = GDF_INT64;
second->size = outputSize;
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
|
e00f278155ad5fda544745097e3701747760d476.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include "two_hop_neighbors.cuh"
#include "utilities/error_utils.h"
#include <rmm_utils.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
template<typename IndexType>
gdf_error gdf_get_two_hop_neighbors_impl(IndexType num_verts,
IndexType* offsets,
IndexType* indices,
IndexType** first,
IndexType** second,
IndexType& outputSize) {
// Get the number of edges from the adjacency representation
IndexType num_edges;
cudaMemcpy(&num_edges, &offsets[num_verts], sizeof(IndexType), cudaMemcpyDefault);
// Allocate memory for temporary stuff
IndexType *exsum_degree = nullptr;
IndexType *first_pair = nullptr;
IndexType *second_pair = nullptr;
IndexType *block_bucket_offsets = nullptr;
ALLOC_MANAGED_TRY(&exsum_degree, sizeof(IndexType) * (num_edges + 1), nullptr);
// Find the degree of the out vertex of each edge
degree_iterator<IndexType> deg_it(offsets);
deref_functor<degree_iterator<IndexType>, IndexType> deref(deg_it);
rmm_temp_allocator allocator(nullptr);
thrust::fill(thrust::cuda::par(allocator).on(nullptr), exsum_degree, exsum_degree + 1, 0);
thrust::transform(thrust::cuda::par(allocator).on(nullptr),
indices,
indices + num_edges,
exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(thrust::cuda::par(allocator).on(nullptr),
exsum_degree + 1,
exsum_degree + num_edges + 1,
exsum_degree + 1);
// Copy out the last value to get the size of scattered output
IndexType output_size;
cudaMemcpy(&output_size, &exsum_degree[num_edges], sizeof(IndexType), cudaMemcpyDefault);
// Allocate memory for the scattered output
ALLOC_MANAGED_TRY(&second_pair, sizeof(IndexType) * output_size, nullptr);
ALLOC_MANAGED_TRY(&first_pair, sizeof(IndexType) * output_size, nullptr);
// Figure out number of blocks and allocate memory for block bucket offsets
IndexType num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
ALLOC_MANAGED_TRY(&block_bucket_offsets, sizeof(IndexType) * (num_blocks + 1), nullptr);
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((IndexType) MAXBLOCKS, (num_blocks / 512) + 1);
compute_bucket_offsets_kernel<<<grid, block, 0, nullptr>>>(exsum_degree,
block_bucket_offsets,
num_edges,
output_size);
cudaMemcpy(&block_bucket_offsets[num_blocks], &num_edges, sizeof(IndexType), cudaMemcpyDefault);
// Scatter the expanded edge lists into temp space
grid.x = min((IndexType) MAXBLOCKS, num_blocks);
scatter_expand_kernel<<<grid, block, 0, nullptr>>>(exsum_degree,
indices,
offsets,
block_bucket_offsets,
num_verts,
output_size,
num_blocks,
first_pair,
second_pair);
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(first_pair, second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(thrust::cuda::par(allocator).on(nullptr), tuple_start, tuple_end);
tuple_end = thrust::copy_if(thrust::cuda::par(allocator).on(nullptr),
tuple_start,
tuple_end,
tuple_start,
self_loop_flagger<IndexType>());
tuple_end = thrust::unique(thrust::cuda::par(allocator).on(nullptr), tuple_start, tuple_end);
// Get things ready to return
outputSize = tuple_end - tuple_start;
ALLOC_MANAGED_TRY(first, sizeof(IndexType) * outputSize, nullptr);
ALLOC_MANAGED_TRY(second, sizeof(IndexType) * outputSize, nullptr);
cudaMemcpy(*first, first_pair, sizeof(IndexType) * outputSize, cudaMemcpyDefault);
cudaMemcpy(*second, second_pair, sizeof(IndexType) * outputSize, cudaMemcpyDefault);
// Free up temporary stuff
ALLOC_FREE_TRY(exsum_degree, nullptr);
ALLOC_FREE_TRY(first_pair, nullptr);
ALLOC_FREE_TRY(second_pair, nullptr);
ALLOC_FREE_TRY(block_bucket_offsets, nullptr);
return GDF_SUCCESS;
}
gdf_error gdf_get_two_hop_neighbors(gdf_graph* graph, gdf_column* first, gdf_column* second) {
GDF_REQUIRE(graph != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(first != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(second != nullptr, GDF_INVALID_API_CALL);
GDF_TRY(gdf_add_adj_list(graph));
size_t num_verts = graph->adjList->offsets->size - 1;
switch (graph->adjList->offsets->dtype) {
case GDF_INT32: {
int32_t* first_ptr;
int32_t* second_ptr;
int32_t outputSize;
gdf_get_two_hop_neighbors_impl((int32_t) num_verts,
(int32_t*) graph->adjList->offsets->data,
(int32_t*) graph->adjList->indices->data,
&first_ptr,
&second_ptr,
outputSize);
first->data = first_ptr;
first->dtype = GDF_INT32;
first->size = outputSize;
second->data = second_ptr;
second->dtype = GDF_INT32;
second->size = outputSize;
break;
}
case GDF_INT64: {
int64_t* first_ptr;
int64_t* second_ptr;
int64_t outputSize;
gdf_get_two_hop_neighbors_impl((int64_t) num_verts,
(int64_t*) graph->adjList->offsets->data,
(int64_t*) graph->adjList->indices->data,
&first_ptr,
&second_ptr,
outputSize);
first->data = first_ptr;
first->dtype = GDF_INT64;
first->size = outputSize;
second->data = second_ptr;
second->dtype = GDF_INT64;
second->size = outputSize;
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
|
fc946f2106c8b41c67a48ae7ba9084347e2c0a36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ConwaysCUDA.h"
#include "pattern_blueprints.h"
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "helper_cuda.h" // From NVIDIA CUDA samples.
#include <cmath>
#include <iostream>
#include <algorithm>
#include <unordered_map>
using ConwaysCUDA::CELL_AGE_T;
using ConwaysCUDA::CELL_STATUS_T;
namespace {
typedef unsigned int uint;
enum PatternMode: uint8_t {
NORMAL, HOVER, UNHOVER
};
// A utility class for sending Patterns to the GPU (with caching).
class PatternCache {
public:
~PatternCache()
{
for (const auto& entry : cache)
checkCudaErrors(hipFree(entry.second));
}
const char* to_gpu(const Pattern& pattern)
{
// Check if it's cached.
auto entry = cache.find(pattern.pattern);
if (entry != cache.end())
return entry->second;
// Copy only the pattern string to the GPU.
char* d_pattern_str;
size_t pattern_bytes = pattern.rows * pattern.cols * sizeof(char);
checkCudaErrors(hipMalloc(&d_pattern_str, pattern_bytes));
checkCudaErrors(
hipMemcpy(d_pattern_str, pattern.pattern,
pattern_bytes, hipMemcpyHostToDevice)
);
// Save it to the cache.
auto pair = cache.insert({ pattern.pattern, d_pattern_str });
return pair.first->second;
}
private:
// Cache from <pattern string> to <device pattern string>.
std::unordered_map<const char*, char*> cache;
};
// A utility class for mapping OpenGL VBOs for CUDA usage.
class WorldVBOMapper {
public:
void init(GLuint vbos[ConwaysCUDA::_VBO_COUNT])
{
for (int i = 0; i < ConwaysCUDA::_VBO_COUNT; ++i)
checkCudaErrors(hipGraphicsGLRegisterBuffer(
&vbo_resources[i], vbos[i], hipGraphicsRegisterFlagsNone));
}
void exit()
{
for (int i = 0; i < ConwaysCUDA::_VBO_COUNT; ++i)
checkCudaErrors(hipGraphicsUnregisterResource(vbo_resources[i]));
}
void map()
{
if (_is_mapped) return;
checkCudaErrors(
hipGraphicsMapResources(ConwaysCUDA::_VBO_COUNT, vbo_resources, 0)
);
size_t num_bytes;
checkCudaErrors(
hipGraphicsResourceGetMappedPointer(
(void **)&d_cell_status_ptr,
&num_bytes,
vbo_resources[ConwaysCUDA::CELL_STATUS])
);
checkCudaErrors(
hipGraphicsResourceGetMappedPointer(
(void **)&d_cell_age_ptr,
&num_bytes,
vbo_resources[ConwaysCUDA::CELL_AGE])
);
_is_mapped = true;
}
void unmap()
{
checkCudaErrors(
hipGraphicsUnmapResources(ConwaysCUDA::_VBO_COUNT, vbo_resources, 0)
);
_is_mapped = false;
}
//bool is_mapped() const { return _is_mapped; }
CELL_STATUS_T* get_cell_status() { return d_cell_status_ptr; }
CELL_AGE_T* get_cell_age() { return d_cell_age_ptr; }
private:
bool _is_mapped = false;
cudaGraphicsResource* vbo_resources[ConwaysCUDA::_VBO_COUNT];
CELL_STATUS_T* d_cell_status_ptr;
CELL_AGE_T* d_cell_age_ptr;
};
CELL_AGE_T* d_prev_cell_age_data; // (in GPU memory).
// Unfortunately, using __managed__ would mean having to hipDeviceSynchronize
// each time before use (to support older GPUs) ... That is why I'm using
// seperate host and device variables.
__device__ int d_world_rows, d_world_cols;
int world_rows, world_cols;
WorldVBOMapper vbo_mapper;
PatternCache pattern_cache;
// Device constants.
// Trying to initialize these two arrays in the kernel resulted
// in illegal memory address problems.
__constant__ int DX[8] = { 1, 1, 1, 0, 0, -1, -1, -1 };
__constant__ int DY[8] = { 0, 1, -1, 1, -1, 0, 1, -1 };
}
// Forward declaration.
void set_pattern(const Blueprint & blueprint, int row, int col, PatternMode type);
__device__
inline bool in_range(int n, int lo, int hi)
{
return n >= lo && n <= hi;
}
// The following two functions define a bijective mapping
// between normal and hovered tile state.
__device__
CELL_STATUS_T cell_to_hovered(CELL_STATUS_T n, bool pattern_bit)
{
if (!in_range(n, -8, 1)) return n;
if (pattern_bit) return n + 30; // [-8, 1] -> [22, 31]
return n + 10; // [-8, 1] -> [2, 11]
}
__device__
CELL_STATUS_T hovered_to_cell(CELL_STATUS_T n)
{
if (in_range(n, 22, 31)) return n - 30;
if (in_range(n, 2, 11)) return n - 10;
return n;
}
__device__
inline bool is_cell_hovered(CELL_STATUS_T n)
{
return !in_range(n, -8, 1);
}
__device__
inline bool is_cell_hovered_bit(CELL_STATUS_T n)
{
return n >= 22;
}
__global__
void copy_mem(const CELL_AGE_T* src, CELL_AGE_T* dst, size_t size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = index; i < size; i += stride)
dst[i] = src[i];
}
__device__
inline int get_world_index(int row, int col)
{
// % (modulo) is used to wrap around the grid to give an illusion
// of an infinite grid.
// -1 % 5 -> -1 [c++]
// -1 % 5 -> 4 [python]
// Solution for negative numbers when doing a mod n:
// a + n mod n, because a + k*n mod n == a mod n !
col = (d_world_cols + col) % d_world_cols;
row = (d_world_rows + row) % d_world_rows;
return row * d_world_cols + col;
}
// First component is the row, the second is the col.
__device__
inline int2 get_grid_cell(int world_idx, int cols)
{
return make_int2(world_idx / cols, world_idx % cols);
}
__device__
int count_neighbours(CELL_AGE_T* old_state, int row, int col)
{
int neighbours = 0;
for (int i = 0; i < 8; ++i) {
int neighbour_idx = get_world_index(row + DY[i], col + DX[i]);
if (old_state[neighbour_idx] > 0)
++neighbours;
}
return neighbours;
}
__global__
void tick_kernel(CELL_AGE_T* old_cell_ages, CELL_STATUS_T* new_cell_status, CELL_AGE_T* new_cell_ages)
{
// 1. Any live cell with fewer than two live neighbors dies, as if by underpopulation.
// 2. Any live cell with two or three live neighbors lives on to the next generation.
// 3. Any live cell with more than three live neighbors dies, as if by overpopulation.
// 4. Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
// A grid-stride loop.
// Each thread in the grid has its own threadIdx, which means
// we can calculate for each thread which elements of an
// array it should process without interfering with any other
// thread.
// index calculation: number of blocks in grid * index of current block + thread index.
// stride tells us the total amount of threads.
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int size = d_world_rows * d_world_cols;
for (int i = index; i < size; i += stride) {
int2 tile = get_grid_cell(i, d_world_cols);
int neighbours = count_neighbours(old_cell_ages, tile.x, tile.y);
bool cell_alive = old_cell_ages[i] > 0;
if (neighbours == 3 || (cell_alive && neighbours == 2)) {
// If a pattern is being hovered at this location,
// we have to keep it hovered and figure out if the pattern bit was set.
int cell_status = new_cell_status[i];
if (is_cell_hovered(cell_status)) {
new_cell_status[i] = cell_to_hovered(1, is_cell_hovered_bit(cell_status));
} else {
new_cell_status[i] = 1;
}
new_cell_ages[i] = max(1, new_cell_ages[i] + 1);
}
else {
int cell_status = new_cell_status[i];
if (is_cell_hovered(cell_status)) {
new_cell_status[i] = cell_to_hovered(-neighbours, is_cell_hovered_bit(cell_status));
} else {
new_cell_status[i] = -neighbours;
}
new_cell_ages[i] = min(-1, new_cell_ages[i] - 1);
}
}
}
// Simulates one iteration of the game of life.
void ConwaysCUDA::tick()
{
// 0. map the VBO for use by CUDA.
// 1. copy the current world state as defined by the VBO.
// 2. update the current state in the VBO.
// 3. unmap the VBO.
// Steps 0 and 3 are done by WorldVBOMapper
// CUDA threads execute in a _grid_ of of threads. Each block in the grid is
// of size block_size, and num_blocks is how many blocks there are in the grid.
// Total number of simultaneous threads is then block_size * num_blocks.
// When running a kernel function (__global__) (GPU code), we specify the size
// of the thread grid.
const uint block_size = 256;
uint grid_size = world_rows * world_cols;
uint num_blocks = (grid_size + block_size - 1) / block_size;
// Copy the previous world state using hipMemcpy or copy_mem kernel:
//hipMemcpy(world_state, data_ptr, grid_size * sizeof(GLbyte), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( copy_mem), dim3(num_blocks), dim3(block_size), 0, 0, vbo_mapper.get_cell_age(),
d_prev_cell_age_data, grid_size);
hipLaunchKernelGGL(( tick_kernel), dim3(num_blocks), dim3(block_size), 0, 0, d_prev_cell_age_data,
vbo_mapper.get_cell_status(),
vbo_mapper.get_cell_age());
}
__global__
void toggle_cell_kernel(CELL_AGE_T* cell_age, CELL_STATUS_T* cell_status, int idx)
{
CELL_STATUS_T status = cell_status[idx];
cell_age[idx] = (status > 0 ? 0 : 1);
cell_status[idx] = cell_age[idx];
}
// There are 3 options for pattern building:
// 1. normal pattern
// 2. hover
// 3. remove hover
__global__
void build_pattern_kernel(CELL_AGE_T* cell_age, CELL_STATUS_T* cell_status,
const Pattern pattern, int row, int col, PatternMode type)
{
// Thread index of cell in pattern.
int thread_idx = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int size = pattern.cols * pattern.rows;
for (int cell_idx = thread_idx; cell_idx < size; cell_idx += stride) {
int2 pattern_cell = get_grid_cell(cell_idx, pattern.cols);
char val = pattern.pattern[cell_idx] - '0';
pattern.get_rotated_coordinates(&pattern_cell.x, &pattern_cell.y);
// (0,0) is at the bottom left
int idx = get_world_index(row - pattern_cell.x, col + pattern_cell.y);
switch (type) {
case NORMAL:
cell_age[idx] = val;
cell_status[idx] = val;
break;
case HOVER:
cell_status[idx] = cell_to_hovered(cell_status[idx], val);
break;
case UNHOVER:
cell_status[idx] = hovered_to_cell(cell_status[idx]);
break;
}
}
}
void set_pattern(const Pattern& pattern, int row, int col, PatternMode type)
{
// This function is the biggest CPU bottleneck when hovering patterns!
// It would be more efficient to batch all the patterns and process them
// in a kernel. Another option would be to send all the patterns to
// the GPU only once at program start-up.
// The option I chose was the simplest: caching in PatternCache.
// That fixed the to_gpu() problem. But now the bottleneck is in WorldVBOMapper.
// The solution is to map right at the start of pattern building
// (in set_pattern(Blueprint ...)) or some earlier time ...
// We have to send the Pattern to the GPU. The pattern string
// gets cached and the pattern contents get copied to the GPU.
// However, we have to point pattern.pattern to the device string
// otherwise the GPU would try to access host memory.
const char* d_pattern_str = pattern_cache.to_gpu(pattern);
Pattern pattern_cpy = pattern;
pattern_cpy.pattern = d_pattern_str;
// Make a thread for each cell in the pattern.
const uint block_size = 64;
uint grid_size = pattern.width() * pattern.height();
uint num_blocks = (grid_size + block_size - 1) / block_size;
hipLaunchKernelGGL(( build_pattern_kernel), dim3(num_blocks), dim3(block_size), 0, 0,
vbo_mapper.get_cell_age(), vbo_mapper.get_cell_status(),
pattern_cpy, row, col, type);
}
void set_pattern(const MultiPattern& pattern, int row, int col, PatternMode type)
{
for (int i = 0; i < pattern.blueprints.size(); ++i) {
int row_offset = pattern.row_offsets[i];
int col_offset = pattern.col_offsets[i];
pattern.get_rotated_offset(pattern.blueprints[i], row_offset, col_offset);
set_pattern(*pattern.blueprints[i],
row + row_offset,
col + col_offset, type);
}
}
void set_pattern(const Blueprint & blueprint, int row, int col, PatternMode type)
{
switch (blueprint.type()) {
case Blueprint::BlueprintType::Pattern:
set_pattern(static_cast<const Pattern&>(blueprint), row, col, type);
break;
case Blueprint::BlueprintType::MultiPattern:
set_pattern(static_cast<const MultiPattern&>(blueprint), row, col, type);
break;
}
}
void ConwaysCUDA::set_pattern(const Blueprint & blueprint, int row, int col)
{
set_pattern(blueprint, row, col, NORMAL);
}
void ConwaysCUDA::set_hover_pattern(const Blueprint & blueprint, int row, int col, bool hover)
{
// Hovered state information is written into the cell_status vbo.
// That information is then used in the fragment shader to highlight
// the given pattern. By using cell_to_hovered and hovered_to_cell,
// we make sure no cell_status information is lost. The cell_age vbo
// is left untouched when hovering patterns.
set_pattern(blueprint, row, col, (hover ? HOVER : UNHOVER));
}
void ConwaysCUDA::toggle_cell(int row, int col)
{
// The alternative to using glMapBuffer, etc...
hipLaunchKernelGGL(( toggle_cell_kernel), dim3(1), dim3(1), 0, 0, vbo_mapper.get_cell_age(),
vbo_mapper.get_cell_status(),
row * world_cols + col);
}
bool ConwaysCUDA::init(int rows, int cols, GLuint vbos[_VBO_COUNT])
{
world_rows = rows;
world_cols = cols;
hipMemcpyToSymbol(d_world_rows, &world_rows, sizeof(int));
hipMemcpyToSymbol(d_world_cols, &world_cols, sizeof(int));
checkCudaErrors(hipMalloc(&d_prev_cell_age_data, sizeof(CELL_AGE_T) * rows * cols));
// Necessary for OpenGL interop.
vbo_mapper.init(vbos);
return true;
}
void ConwaysCUDA::exit()
{
vbo_mapper.exit();
checkCudaErrors(hipFree(d_prev_cell_age_data));
}
void ConwaysCUDA::start_interop()
{
vbo_mapper.map();
}
void ConwaysCUDA::stop_interop()
{
vbo_mapper.unmap();
}
|
fc946f2106c8b41c67a48ae7ba9084347e2c0a36.cu
|
#include "ConwaysCUDA.h"
#include "pattern_blueprints.h"
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "helper_cuda.h" // From NVIDIA CUDA samples.
#include <cmath>
#include <iostream>
#include <algorithm>
#include <unordered_map>
using ConwaysCUDA::CELL_AGE_T;
using ConwaysCUDA::CELL_STATUS_T;
namespace {
typedef unsigned int uint;
enum PatternMode: uint8_t {
NORMAL, HOVER, UNHOVER
};
// A utility class for sending Patterns to the GPU (with caching).
class PatternCache {
public:
~PatternCache()
{
for (const auto& entry : cache)
checkCudaErrors(cudaFree(entry.second));
}
const char* to_gpu(const Pattern& pattern)
{
// Check if it's cached.
auto entry = cache.find(pattern.pattern);
if (entry != cache.end())
return entry->second;
// Copy only the pattern string to the GPU.
char* d_pattern_str;
size_t pattern_bytes = pattern.rows * pattern.cols * sizeof(char);
checkCudaErrors(cudaMalloc(&d_pattern_str, pattern_bytes));
checkCudaErrors(
cudaMemcpy(d_pattern_str, pattern.pattern,
pattern_bytes, cudaMemcpyHostToDevice)
);
// Save it to the cache.
auto pair = cache.insert({ pattern.pattern, d_pattern_str });
return pair.first->second;
}
private:
// Cache from <pattern string> to <device pattern string>.
std::unordered_map<const char*, char*> cache;
};
// A utility class for mapping OpenGL VBOs for CUDA usage.
class WorldVBOMapper {
public:
void init(GLuint vbos[ConwaysCUDA::_VBO_COUNT])
{
for (int i = 0; i < ConwaysCUDA::_VBO_COUNT; ++i)
checkCudaErrors(cudaGraphicsGLRegisterBuffer(
&vbo_resources[i], vbos[i], cudaGraphicsRegisterFlagsNone));
}
void exit()
{
for (int i = 0; i < ConwaysCUDA::_VBO_COUNT; ++i)
checkCudaErrors(cudaGraphicsUnregisterResource(vbo_resources[i]));
}
void map()
{
if (_is_mapped) return;
checkCudaErrors(
cudaGraphicsMapResources(ConwaysCUDA::_VBO_COUNT, vbo_resources, 0)
);
size_t num_bytes;
checkCudaErrors(
cudaGraphicsResourceGetMappedPointer(
(void **)&d_cell_status_ptr,
&num_bytes,
vbo_resources[ConwaysCUDA::CELL_STATUS])
);
checkCudaErrors(
cudaGraphicsResourceGetMappedPointer(
(void **)&d_cell_age_ptr,
&num_bytes,
vbo_resources[ConwaysCUDA::CELL_AGE])
);
_is_mapped = true;
}
void unmap()
{
checkCudaErrors(
cudaGraphicsUnmapResources(ConwaysCUDA::_VBO_COUNT, vbo_resources, 0)
);
_is_mapped = false;
}
//bool is_mapped() const { return _is_mapped; }
CELL_STATUS_T* get_cell_status() { return d_cell_status_ptr; }
CELL_AGE_T* get_cell_age() { return d_cell_age_ptr; }
private:
bool _is_mapped = false;
cudaGraphicsResource* vbo_resources[ConwaysCUDA::_VBO_COUNT];
CELL_STATUS_T* d_cell_status_ptr;
CELL_AGE_T* d_cell_age_ptr;
};
CELL_AGE_T* d_prev_cell_age_data; // (in GPU memory).
// Unfortunately, using __managed__ would mean having to cudaDeviceSynchronize
// each time before use (to support older GPUs) ... That is why I'm using
// seperate host and device variables.
__device__ int d_world_rows, d_world_cols;
int world_rows, world_cols;
WorldVBOMapper vbo_mapper;
PatternCache pattern_cache;
// Device constants.
// Trying to initialize these two arrays in the kernel resulted
// in illegal memory address problems.
__constant__ int DX[8] = { 1, 1, 1, 0, 0, -1, -1, -1 };
__constant__ int DY[8] = { 0, 1, -1, 1, -1, 0, 1, -1 };
}
// Forward declaration.
void set_pattern(const Blueprint & blueprint, int row, int col, PatternMode type);
__device__
inline bool in_range(int n, int lo, int hi)
{
return n >= lo && n <= hi;
}
// The following two functions define a bijective mapping
// between normal and hovered tile state.
__device__
CELL_STATUS_T cell_to_hovered(CELL_STATUS_T n, bool pattern_bit)
{
if (!in_range(n, -8, 1)) return n;
if (pattern_bit) return n + 30; // [-8, 1] -> [22, 31]
return n + 10; // [-8, 1] -> [2, 11]
}
__device__
CELL_STATUS_T hovered_to_cell(CELL_STATUS_T n)
{
if (in_range(n, 22, 31)) return n - 30;
if (in_range(n, 2, 11)) return n - 10;
return n;
}
__device__
inline bool is_cell_hovered(CELL_STATUS_T n)
{
return !in_range(n, -8, 1);
}
__device__
inline bool is_cell_hovered_bit(CELL_STATUS_T n)
{
return n >= 22;
}
__global__
void copy_mem(const CELL_AGE_T* src, CELL_AGE_T* dst, size_t size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = index; i < size; i += stride)
dst[i] = src[i];
}
__device__
inline int get_world_index(int row, int col)
{
// % (modulo) is used to wrap around the grid to give an illusion
// of an infinite grid.
// -1 % 5 -> -1 [c++]
// -1 % 5 -> 4 [python]
// Solution for negative numbers when doing a mod n:
// a + n mod n, because a + k*n mod n == a mod n !
col = (d_world_cols + col) % d_world_cols;
row = (d_world_rows + row) % d_world_rows;
return row * d_world_cols + col;
}
// First component is the row, the second is the col.
__device__
inline int2 get_grid_cell(int world_idx, int cols)
{
return make_int2(world_idx / cols, world_idx % cols);
}
__device__
int count_neighbours(CELL_AGE_T* old_state, int row, int col)
{
int neighbours = 0;
for (int i = 0; i < 8; ++i) {
int neighbour_idx = get_world_index(row + DY[i], col + DX[i]);
if (old_state[neighbour_idx] > 0)
++neighbours;
}
return neighbours;
}
__global__
void tick_kernel(CELL_AGE_T* old_cell_ages, CELL_STATUS_T* new_cell_status, CELL_AGE_T* new_cell_ages)
{
// 1. Any live cell with fewer than two live neighbors dies, as if by underpopulation.
// 2. Any live cell with two or three live neighbors lives on to the next generation.
// 3. Any live cell with more than three live neighbors dies, as if by overpopulation.
// 4. Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
// A grid-stride loop.
// Each thread in the grid has its own threadIdx, which means
// we can calculate for each thread which elements of an
// array it should process without interfering with any other
// thread.
// index calculation: number of blocks in grid * index of current block + thread index.
// stride tells us the total amount of threads.
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int size = d_world_rows * d_world_cols;
for (int i = index; i < size; i += stride) {
int2 tile = get_grid_cell(i, d_world_cols);
int neighbours = count_neighbours(old_cell_ages, tile.x, tile.y);
bool cell_alive = old_cell_ages[i] > 0;
if (neighbours == 3 || (cell_alive && neighbours == 2)) {
// If a pattern is being hovered at this location,
// we have to keep it hovered and figure out if the pattern bit was set.
int cell_status = new_cell_status[i];
if (is_cell_hovered(cell_status)) {
new_cell_status[i] = cell_to_hovered(1, is_cell_hovered_bit(cell_status));
} else {
new_cell_status[i] = 1;
}
new_cell_ages[i] = max(1, new_cell_ages[i] + 1);
}
else {
int cell_status = new_cell_status[i];
if (is_cell_hovered(cell_status)) {
new_cell_status[i] = cell_to_hovered(-neighbours, is_cell_hovered_bit(cell_status));
} else {
new_cell_status[i] = -neighbours;
}
new_cell_ages[i] = min(-1, new_cell_ages[i] - 1);
}
}
}
// Simulates one iteration of the game of life.
void ConwaysCUDA::tick()
{
// 0. map the VBO for use by CUDA.
// 1. copy the current world state as defined by the VBO.
// 2. update the current state in the VBO.
// 3. unmap the VBO.
// Steps 0 and 3 are done by WorldVBOMapper
// CUDA threads execute in a _grid_ of of threads. Each block in the grid is
// of size block_size, and num_blocks is how many blocks there are in the grid.
// Total number of simultaneous threads is then block_size * num_blocks.
// When running a kernel function (__global__) (GPU code), we specify the size
// of the thread grid.
const uint block_size = 256;
uint grid_size = world_rows * world_cols;
uint num_blocks = (grid_size + block_size - 1) / block_size;
// Copy the previous world state using cudaMemcpy or copy_mem kernel:
//cudaMemcpy(world_state, data_ptr, grid_size * sizeof(GLbyte), cudaMemcpyDeviceToDevice);
copy_mem<<<num_blocks, block_size>>>(vbo_mapper.get_cell_age(),
d_prev_cell_age_data, grid_size);
tick_kernel<<<num_blocks, block_size>>>(d_prev_cell_age_data,
vbo_mapper.get_cell_status(),
vbo_mapper.get_cell_age());
}
__global__
void toggle_cell_kernel(CELL_AGE_T* cell_age, CELL_STATUS_T* cell_status, int idx)
{
CELL_STATUS_T status = cell_status[idx];
cell_age[idx] = (status > 0 ? 0 : 1);
cell_status[idx] = cell_age[idx];
}
// There are 3 options for pattern building:
// 1. normal pattern
// 2. hover
// 3. remove hover
__global__
void build_pattern_kernel(CELL_AGE_T* cell_age, CELL_STATUS_T* cell_status,
const Pattern pattern, int row, int col, PatternMode type)
{
// Thread index of cell in pattern.
int thread_idx = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int size = pattern.cols * pattern.rows;
for (int cell_idx = thread_idx; cell_idx < size; cell_idx += stride) {
int2 pattern_cell = get_grid_cell(cell_idx, pattern.cols);
char val = pattern.pattern[cell_idx] - '0';
pattern.get_rotated_coordinates(&pattern_cell.x, &pattern_cell.y);
// (0,0) is at the bottom left
int idx = get_world_index(row - pattern_cell.x, col + pattern_cell.y);
switch (type) {
case NORMAL:
cell_age[idx] = val;
cell_status[idx] = val;
break;
case HOVER:
cell_status[idx] = cell_to_hovered(cell_status[idx], val);
break;
case UNHOVER:
cell_status[idx] = hovered_to_cell(cell_status[idx]);
break;
}
}
}
void set_pattern(const Pattern& pattern, int row, int col, PatternMode type)
{
// This function is the biggest CPU bottleneck when hovering patterns!
// It would be more efficient to batch all the patterns and process them
// in a kernel. Another option would be to send all the patterns to
// the GPU only once at program start-up.
// The option I chose was the simplest: caching in PatternCache.
// That fixed the to_gpu() problem. But now the bottleneck is in WorldVBOMapper.
// The solution is to map right at the start of pattern building
// (in set_pattern(Blueprint ...)) or some earlier time ...
// We have to send the Pattern to the GPU. The pattern string
// gets cached and the pattern contents get copied to the GPU.
// However, we have to point pattern.pattern to the device string
// otherwise the GPU would try to access host memory.
const char* d_pattern_str = pattern_cache.to_gpu(pattern);
Pattern pattern_cpy = pattern;
pattern_cpy.pattern = d_pattern_str;
// Make a thread for each cell in the pattern.
const uint block_size = 64;
uint grid_size = pattern.width() * pattern.height();
uint num_blocks = (grid_size + block_size - 1) / block_size;
build_pattern_kernel<<<num_blocks, block_size>>>(
vbo_mapper.get_cell_age(), vbo_mapper.get_cell_status(),
pattern_cpy, row, col, type);
}
void set_pattern(const MultiPattern& pattern, int row, int col, PatternMode type)
{
for (int i = 0; i < pattern.blueprints.size(); ++i) {
int row_offset = pattern.row_offsets[i];
int col_offset = pattern.col_offsets[i];
pattern.get_rotated_offset(pattern.blueprints[i], row_offset, col_offset);
set_pattern(*pattern.blueprints[i],
row + row_offset,
col + col_offset, type);
}
}
void set_pattern(const Blueprint & blueprint, int row, int col, PatternMode type)
{
switch (blueprint.type()) {
case Blueprint::BlueprintType::Pattern:
set_pattern(static_cast<const Pattern&>(blueprint), row, col, type);
break;
case Blueprint::BlueprintType::MultiPattern:
set_pattern(static_cast<const MultiPattern&>(blueprint), row, col, type);
break;
}
}
void ConwaysCUDA::set_pattern(const Blueprint & blueprint, int row, int col)
{
set_pattern(blueprint, row, col, NORMAL);
}
void ConwaysCUDA::set_hover_pattern(const Blueprint & blueprint, int row, int col, bool hover)
{
// Hovered state information is written into the cell_status vbo.
// That information is then used in the fragment shader to highlight
// the given pattern. By using cell_to_hovered and hovered_to_cell,
// we make sure no cell_status information is lost. The cell_age vbo
// is left untouched when hovering patterns.
set_pattern(blueprint, row, col, (hover ? HOVER : UNHOVER));
}
void ConwaysCUDA::toggle_cell(int row, int col)
{
// The alternative to using glMapBuffer, etc...
toggle_cell_kernel<<<1, 1>>>(vbo_mapper.get_cell_age(),
vbo_mapper.get_cell_status(),
row * world_cols + col);
}
bool ConwaysCUDA::init(int rows, int cols, GLuint vbos[_VBO_COUNT])
{
world_rows = rows;
world_cols = cols;
cudaMemcpyToSymbol(d_world_rows, &world_rows, sizeof(int));
cudaMemcpyToSymbol(d_world_cols, &world_cols, sizeof(int));
checkCudaErrors(cudaMalloc(&d_prev_cell_age_data, sizeof(CELL_AGE_T) * rows * cols));
// Necessary for OpenGL interop.
vbo_mapper.init(vbos);
return true;
}
void ConwaysCUDA::exit()
{
vbo_mapper.exit();
checkCudaErrors(cudaFree(d_prev_cell_age_data));
}
void ConwaysCUDA::start_interop()
{
vbo_mapper.map();
}
void ConwaysCUDA::stop_interop()
{
vbo_mapper.unmap();
}
|
853d846a2a42b7c5d2e99d5afbe97bceef2a0553.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file svm_output.cu
* \brief
* \author Jonas Amaro
*/
#include "./svm_output-inl.h"
#include <device_launch_parameters.h>
#include "mshadow/tensor.h"
namespace mshadow {
template<int n_bits, typename DType>
__global__ void L1_SVMKernel(const DType margin,
const DType reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 2, DType> src) {
const index_t nmax = dst.size(1);
const unsigned n_size = 1 << n_bits;
const int y = blockIdx.x;
const int n = threadIdx.x;
const index_t k = static_cast<int>(label[y]);
for (index_t n_index = n; n_index < nmax; n_index += n_size) {
if (n_index == k) {
dst[y][k] = -DType(margin > src[y][k]) * reg_coef;
} else {
dst[y][n_index] = DType(margin > -src[y][n_index]) * reg_coef;
}
}
}
template<typename DType>
inline void L1_SVM(const DType & margin,
const DType & reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> & label,
const Tensor<gpu, 2, DType> & src) {
dim3 dimBlock(cuda::kBaseThreadNum);
dim3 dimGrid(dst.size(0));
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( L1_SVMKernel<cuda::kBaseThreadBits, DType>) , dim3(dimGrid), dim3(dimBlock), 0, stream ,
margin, reg_coef, dst, label, src);
}
template<int n_bits, typename DType>
__global__ void L2_SVMKernel(const DType margin,
const DType reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 2, DType> src) {
const index_t nmax = dst.size(1);
const unsigned n_size = 1 << n_bits;
const int y = blockIdx.x;
const int n = threadIdx.x;
const index_t k = static_cast<int>(label[y]);
for (index_t n_index = n; n_index < nmax; n_index += n_size) {
if (n_index == k) {
dst[y][k] = margin > src[y][k] ? 2 * (margin - src[y][k]) : DType(0.0f);
dst[y][k] *= -reg_coef;
} else {
dst[y][n_index] = margin > -src[y][n_index] ? (-2)*(margin + src[y][n_index]) : DType(0.0f);
dst[y][n_index] *= -reg_coef;
}
}
}
template<typename DType>
inline void L2_SVM(const DType & margin,
const DType & reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> & label,
const Tensor<gpu, 2, DType> & src) {
dim3 dimBlock(cuda::kBaseThreadNum);
dim3 dimGrid(dst.size(0));
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( L2_SVMKernel<cuda::kBaseThreadBits, DType>) , dim3(dimGrid), dim3(dimBlock), 0, stream ,
margin, reg_coef, dst, label, src);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(SVMOutputParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new SVMOutputOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
853d846a2a42b7c5d2e99d5afbe97bceef2a0553.cu
|
/*!
* Copyright (c) 2015 by Contributors
* \file svm_output.cu
* \brief
* \author Jonas Amaro
*/
#include "./svm_output-inl.h"
#include <device_launch_parameters.h>
#include "mshadow/tensor.h"
namespace mshadow {
template<int n_bits, typename DType>
__global__ void L1_SVMKernel(const DType margin,
const DType reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 2, DType> src) {
const index_t nmax = dst.size(1);
const unsigned n_size = 1 << n_bits;
const int y = blockIdx.x;
const int n = threadIdx.x;
const index_t k = static_cast<int>(label[y]);
for (index_t n_index = n; n_index < nmax; n_index += n_size) {
if (n_index == k) {
dst[y][k] = -DType(margin > src[y][k]) * reg_coef;
} else {
dst[y][n_index] = DType(margin > -src[y][n_index]) * reg_coef;
}
}
}
template<typename DType>
inline void L1_SVM(const DType & margin,
const DType & reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> & label,
const Tensor<gpu, 2, DType> & src) {
dim3 dimBlock(cuda::kBaseThreadNum);
dim3 dimGrid(dst.size(0));
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
L1_SVMKernel<cuda::kBaseThreadBits, DType> <<<dimGrid, dimBlock, 0, stream >>>
(margin, reg_coef, dst, label, src);
}
template<int n_bits, typename DType>
__global__ void L2_SVMKernel(const DType margin,
const DType reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 2, DType> src) {
const index_t nmax = dst.size(1);
const unsigned n_size = 1 << n_bits;
const int y = blockIdx.x;
const int n = threadIdx.x;
const index_t k = static_cast<int>(label[y]);
for (index_t n_index = n; n_index < nmax; n_index += n_size) {
if (n_index == k) {
dst[y][k] = margin > src[y][k] ? 2 * (margin - src[y][k]) : DType(0.0f);
dst[y][k] *= -reg_coef;
} else {
dst[y][n_index] = margin > -src[y][n_index] ? (-2)*(margin + src[y][n_index]) : DType(0.0f);
dst[y][n_index] *= -reg_coef;
}
}
}
template<typename DType>
inline void L2_SVM(const DType & margin,
const DType & reg_coef,
Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 1, DType> & label,
const Tensor<gpu, 2, DType> & src) {
dim3 dimBlock(cuda::kBaseThreadNum);
dim3 dimGrid(dst.size(0));
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
L2_SVMKernel<cuda::kBaseThreadBits, DType> <<<dimGrid, dimBlock, 0, stream >>>
(margin, reg_coef, dst, label, src);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(SVMOutputParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new SVMOutputOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
813a43bbebd9f833f1b67e2085250f5d7cb056ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "matrix_lib.h"
#define THREADS_PER_BLOCK 256
//Funo kernel para multiplicar matriz A por um nmero escalar, atualizando o valor da matriz A
__global__
void mult_scalar(int n , float scalar, float *d_a){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x *blockDim.x;
for(int i = index; i < n; i += stride){
d_a[i] = d_a[i] * scalar;
}
}
//Funo kernel para multiplicar a matriz A pela matriz B, guardando o resultado em uma matriz C
__global__
void mult_matrix(int w_a, int w_b, int h_b, int h_a, float *d_a, float *d_b, float *d_c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x *blockDim.x;
int w_c = h_a;
int h_c = w_b;
// Calculando a matriz resultante
for(int i = index; i < w_c*h_c; i += stride) {
d_c[i] = 0;
for(int j = 0; j < w_a; j++) {
d_c[i] += d_a[(i/w_c)*w_a + j] * d_b[w_b*j + i%w_c];
}
}
}
int scalar_matrix_mult(float scalar_value, struct matrix *matrix){
if(matrix == NULL) return 0;
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (matrix->height*matrix->width + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( mult_scalar), dim3(numBlocks), dim3(blockSize), 0, 0, matrix->height*matrix->width,scalar_value,matrix->d_rows);
return 1;
}
int matrix_matrix_mult(struct matrix *matrixA, struct matrix *matrixB, struct matrix *matrixC){
if(matrixA == NULL || matrixB == NULL|| matrixC == NULL) return 0;
if(matrixA->width != matrixB->height){
printf("No possvel multiplicar matriz %dx%d por outra %dx%d\n", matrixA->height, matrixA->width, matrixB->height, matrixB->width);
return 0;
}
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (matrixC->height*matrixC->width + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( mult_matrix), dim3(numBlocks), dim3(blockSize), 0, 0, matrixA->width, matrixB->width, matrixB->height, matrixA->height ,matrixA->d_rows, matrixB->d_rows, matrixC->d_rows);
return 1;
}
|
813a43bbebd9f833f1b67e2085250f5d7cb056ae.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <math.h>
#include "matrix_lib.h"
#define THREADS_PER_BLOCK 256
//Função kernel para multiplicar matriz A por um número escalar, atualizando o valor da matriz A
__global__
void mult_scalar(int n , float scalar, float *d_a){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x *blockDim.x;
for(int i = index; i < n; i += stride){
d_a[i] = d_a[i] * scalar;
}
}
//Função kernel para multiplicar a matriz A pela matriz B, guardando o resultado em uma matriz C
__global__
void mult_matrix(int w_a, int w_b, int h_b, int h_a, float *d_a, float *d_b, float *d_c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x *blockDim.x;
int w_c = h_a;
int h_c = w_b;
// Calculando a matriz resultante
for(int i = index; i < w_c*h_c; i += stride) {
d_c[i] = 0;
for(int j = 0; j < w_a; j++) {
d_c[i] += d_a[(i/w_c)*w_a + j] * d_b[w_b*j + i%w_c];
}
}
}
int scalar_matrix_mult(float scalar_value, struct matrix *matrix){
if(matrix == NULL) return 0;
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (matrix->height*matrix->width + blockSize - 1) / blockSize;
mult_scalar<<<numBlocks, blockSize>>>(matrix->height*matrix->width,scalar_value,matrix->d_rows);
return 1;
}
int matrix_matrix_mult(struct matrix *matrixA, struct matrix *matrixB, struct matrix *matrixC){
if(matrixA == NULL || matrixB == NULL|| matrixC == NULL) return 0;
if(matrixA->width != matrixB->height){
printf("Não é possível multiplicar matriz %dx%d por outra %dx%d\n", matrixA->height, matrixA->width, matrixB->height, matrixB->width);
return 0;
}
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (matrixC->height*matrixC->width + blockSize - 1) / blockSize;
mult_matrix<<<numBlocks, blockSize>>>(matrixA->width, matrixB->width, matrixB->height, matrixA->height ,matrixA->d_rows, matrixB->d_rows, matrixC->d_rows);
return 1;
}
|
d094ecf54207c439ec8264d14feab209ec69bab4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<30;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elementshipLaunchKernelGGL((
saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
|
d094ecf54207c439ec8264d14feab209ec69bab4.cu
|
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<30;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
ac5fd63f7d6dda56be3f9c98986ce8c11a5747d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_d
__global__
void magma_dlarfg_gpu_kernel( int n, double* dx0, double* dx,
double *dtau, double *dxnorm, double* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ double scale;
double xnorm;
double dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_D_ZERO;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
double alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_D_REAL(alpha), alphai = MAGMA_D_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_D_MAKE(beta, 0.);
alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha));
scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_D_MUL(dxi, scale);
} else
*dtau = MAGMA_D_ZERO;
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" magma_int_t
magma_dlarfg_gpu( magma_int_t n, double *dx0, double *dx,
double *dtau, double *dxnorm, double *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dnrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_dlarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
return MAGMA_SUCCESS;
}
|
ac5fd63f7d6dda56be3f9c98986ce8c11a5747d0.cu
|
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_d
__global__
void magma_dlarfg_gpu_kernel( int n, double* dx0, double* dx,
double *dtau, double *dxnorm, double* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ double scale;
double xnorm;
double dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_D_ZERO;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
double alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_D_REAL(alpha), alphai = MAGMA_D_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_D_MAKE(beta, 0.);
alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha));
scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_D_MUL(dxi, scale);
} else
*dtau = MAGMA_D_ZERO;
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" magma_int_t
magma_dlarfg_gpu( magma_int_t n, double *dx0, double *dx,
double *dtau, double *dxnorm, double *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dnrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_dlarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
return MAGMA_SUCCESS;
}
|
0a42eed57df90c2a97875c5badb378f90172b80c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file cutlass_mlp.cu
* @author Thomas Mller, NVIDIA
* @brief CUTLASS implementation of an optimized multi-layer perceptron. Supports online training
* and simultaneous inference.
*/
#include <tiny-cuda-nn/networks/cutlass_mlp.h>
#include <tiny-cuda-nn/cutlass_matmul.h>
#include <tiny-cuda-nn/multi_stream.h>
TCNN_NAMESPACE_BEGIN
template <typename T>
CutlassMLP<T>::CutlassMLP(
uint32_t input_width,
uint32_t network_width,
uint32_t output_width,
uint32_t n_hidden_layers,
Activation activation,
Activation output_activation
) :
m_input_width{input_width},
m_network_width{network_width},
m_output_width{output_width},
m_n_hidden_layers{n_hidden_layers},
m_activation{activation},
m_output_activation{output_activation},
m_can_fuse_activation{activation != Activation::Sine}
{
m_padded_output_width = next_multiple(m_output_width, tensorcore_width);
if (n_hidden_layers > 0) {
m_n_hidden_matmuls = n_hidden_layers-1;
} else {
m_n_hidden_matmuls = 0;
}
// Create matrices related to weights
if (n_hidden_layers == 0) {
m_weight_matrices.emplace_back(nullptr, m_padded_output_width, m_input_width);
m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, m_input_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, m_input_width);
m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, m_input_width);
} else {
m_weight_matrices.emplace_back(nullptr, m_network_width, m_input_width);
m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_input_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_input_width);
m_gradient_matrices.emplace_back(nullptr, m_network_width, m_input_width);
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
m_weight_matrices.emplace_back(nullptr, m_network_width, m_network_width);
m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_network_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_network_width);
m_gradient_matrices.emplace_back(nullptr, m_network_width, m_network_width);
}
m_weight_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width);
m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, m_network_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, m_network_width);
m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width);
}
// Determine total number of memory entries and set it
m_total_n_params = 0;
for (const auto& m : m_weight_matrices) {
m_total_n_params += m.n_elements();
}
}
template <typename CutlassLayer, typename T>
bool compute_layer(
hipStream_t stream,
bool is_inference,
Activation activation,
const GPUMatrix<T, RM>& weights,
const GPUMatrixDynamic<T>& input,
GPUMatrixDynamic<T>& output,
GPUMatrixDynamic<T>& activation_output
) {
bool can_fuse_activation = true;
if (!is_inference) {
// Never disallow fusing if the caller passes the same output and activation_output buffers... in that case,
// invertibility of the activation function may be ignored.
can_fuse_activation &= activation != Activation::Sine || &output == &activation_output;
}
if (can_fuse_activation) {
fc_multiply<CutlassLayer>(stream, weights, input, output, activation);
} else {
fc_multiply<CutlassLayer>(stream, weights, input, output);
activation_gpu(stream, activation, output, activation_output);
}
return can_fuse_activation;
}
template <typename CutlassLayer, typename T>
bool compute_inference_layer(
hipStream_t stream,
Activation activation,
const GPUMatrix<T, RM>& weights,
const GPUMatrixDynamic<T>& input,
GPUMatrixDynamic<T>& output
) {
return compute_layer<CutlassLayer>(stream, true, activation, weights, input, output, output);
}
template <typename T>
void CutlassMLP<T>::inference_mixed_precision_impl(hipStream_t stream, const GPUMatrixDynamic<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_params) {
// If there are no hidden layers, the network is just a simple matmul.
if (m_n_hidden_layers == 0) {
compute_inference_layer<LastLayer>(stream, m_output_activation, input_weight_matrix(use_inference_params), input, output);
return;
}
uint32_t batch_size = input.n();
GPUMatrix<T> inference_tmp[2] = {
GPUMatrix<T>{m_network_width, batch_size, stream},
GPUMatrix<T>{m_network_width, batch_size, stream},
};
m_inference_graph.capture_and_execute(stream, false, [&]() {
// Run the actual network
{
uint32_t tmp_idx = 0;
// Input layer
compute_inference_layer<FullLayer>(stream, m_activation, input_weight_matrix(use_inference_params), input, inference_tmp[tmp_idx++ % 2]);
// Hidden layers
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
compute_inference_layer<FullLayer>(stream, m_activation, weight_matrix_at(use_inference_params, i), inference_tmp[(tmp_idx + 1) % 2], inference_tmp[tmp_idx % 2]);
++tmp_idx;
}
// Output
compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(use_inference_params), inference_tmp[(tmp_idx + 1) % 2], output);
}
});
}
template <typename T>
std::unique_ptr<Context> CutlassMLP<T>::forward_impl(hipStream_t stream, const GPUMatrixDynamic<T>& input, GPUMatrixDynamic<T>* output, bool use_inference_params, bool prepare_input_gradients) {
// If there are no hidden layers, the network is just a simple matmul. No tmp buffers required
if (m_n_hidden_layers == 0) {
if (output) {
compute_layer<LastLayer>(stream, false, m_output_activation, input_weight_matrix(use_inference_params), input, *output, *output);
}
return std::make_unique<ForwardContext>(); // Nothing to save -- empty context
}
// Make sure our temporary buffers have the correct size for the given batch size
uint32_t batch_size = input.n();
auto forward = allocate_forward_buffers(stream, batch_size);
// Run the actual network
uint32_t tmp_idx = 0;
bool fused = compute_layer<FullLayer>(
stream,
false,
m_activation,
input_weight_matrix(use_inference_params),
input,
forward->hidden.at(tmp_idx),
m_can_fuse_activation ? forward->hidden.at(tmp_idx) : forward->hidden.at(tmp_idx+1)
);
tmp_idx += fused ? 1 : 2;
// layers
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
fused = compute_layer<FullLayer>(
stream,
false,
m_activation,
weight_matrix_at(use_inference_params, i),
forward->hidden.at(tmp_idx-1),
forward->hidden.at(tmp_idx),
m_can_fuse_activation ? forward->hidden.at(tmp_idx) : forward->hidden.at(tmp_idx+1)
);
tmp_idx += fused ? 1 : 2;
}
if (output) {
compute_layer<LastLayer>(stream, false, m_output_activation, output_weight_matrix(use_inference_params), forward->hidden.at(tmp_idx-1), *output, *output);
}
return forward;
}
template <typename T>
void CutlassMLP<T>::backward_impl(
hipStream_t stream,
const Context& ctx,
const GPUMatrixDynamic<T>& input,
const GPUMatrixDynamic<T>& output,
const GPUMatrixDynamic<T>& dL_doutput,
GPUMatrixDynamic<T>* dL_dinput,
bool use_inference_params,
EGradientMode param_gradients_mode
) {
// Make sure our temporary buffers have the correct size for the given batch size
uint32_t batch_size = dL_doutput.n();
std::vector<GPUMatrix<T>> backward_tmp(num_forward_activations());
for (uint32_t i = 0; i < num_forward_activations(); ++i) {
backward_tmp[i] = GPUMatrix<T>{m_network_width, batch_size, stream};
}
// Compute transfer of output activation in-place... it's treated specially for performance reasons
GPUMatrixDynamic<T> backward_output_tmp;
if (m_output_activation != Activation::None) {
backward_output_tmp = {m_padded_output_width, batch_size, stream, dL_doutput.layout()};
activation_backward_output_gpu(stream, dL_doutput.n_elements(), m_output_activation, output.data(), dL_doutput.data(), backward_output_tmp.data());
}
// Backprop
// - weight_gradient.T = activation * output_gradient.T
// - input_gradient = weights.T * output_gradient
// - RELU: pre_activation_gradinet = post_activation_gradient if val > 0 else 0
const float param_gradient_beta = param_gradients_mode == EGradientMode::Accumulate ? 1.0f : 0.0f;
std::vector<SyncedMultiStream> multi_streams;
const auto& forward = dynamic_cast<const ForwardContext&>(ctx);
int split_k_factor = batch_size / ::min((uint32_t)(1 << 12), batch_size);
const GPUMatrixDynamic<T>& tmp_dL_doutput = m_output_activation == Activation::None ? dL_doutput : backward_output_tmp;
// If there are no hidden layers, the network is just a simple matmul
if (m_n_hidden_layers == 0) {
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<LastLayerK>(multi_streams.back().get(1), tmp_dL_doutput, input.transposed(), input_gradient_matrix(), split_k_factor, param_gradient_beta);
}
if (dL_dinput) {
fc_multiply<FullLayer>(stream, input_weight_matrix(use_inference_params).transposed(), tmp_dL_doutput, *dL_dinput);
}
return;
}
uint32_t tmp_idx = (m_can_fuse_activation ? (m_n_hidden_matmuls+1) : ((m_n_hidden_matmuls+1) * 2)) - 1;
uint32_t backward_tmp_idx = 0;
// Output layer
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<LastLayerK>(multi_streams.back().get(1), tmp_dL_doutput, forward.hidden.at(tmp_idx).transposed(), output_gradient_matrix(), split_k_factor, param_gradient_beta);
}
if (!m_can_fuse_activation) {
fc_multiply<FullLayer>(stream, output_weight_matrix(use_inference_params).transposed(), tmp_dL_doutput, backward_tmp.at(backward_tmp_idx));
activation_backward_gpu(stream, m_activation, forward.hidden.at(tmp_idx-1), backward_tmp.at(backward_tmp_idx));
} else {
fc_multiply<FullLayer>(stream, output_weight_matrix(use_inference_params).transposed(), tmp_dL_doutput, forward.hidden.at(tmp_idx), backward_tmp.at(backward_tmp_idx), m_activation, true);
}
tmp_idx -= m_can_fuse_activation ? 1 : 2;
++backward_tmp_idx;
// layers
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
uint32_t matrix_idx = m_n_hidden_matmuls - i - 1;
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<FullLayerK>(multi_streams.back().get(1), backward_tmp.at(backward_tmp_idx-1), forward.hidden.at(tmp_idx).transposed(), gradient_matrix_at(matrix_idx), split_k_factor, param_gradient_beta);
}
if (!m_can_fuse_activation) {
fc_multiply<FullLayer>(stream, weight_matrix_at(use_inference_params, matrix_idx).transposed(), backward_tmp.at(backward_tmp_idx-1), backward_tmp.at(backward_tmp_idx));
activation_backward_gpu(stream, m_activation, forward.hidden.at(tmp_idx-1), backward_tmp.at(backward_tmp_idx));
} else {
fc_multiply<FullLayer>(stream, weight_matrix_at(use_inference_params, matrix_idx).transposed(), backward_tmp.at(backward_tmp_idx-1), forward.hidden.at(tmp_idx), backward_tmp.at(backward_tmp_idx), m_activation, true);
}
tmp_idx -= m_can_fuse_activation ? 1 : 2;
++backward_tmp_idx;
}
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<FullLayerK>(multi_streams.back().get(1), backward_tmp.at(backward_tmp_idx-1), input.transposed(), input_gradient_matrix(), split_k_factor, param_gradient_beta);
}
// If requested, compute sensitivity of loss w.r.t. inputs
if (dL_dinput) {
// optimization opportunity to only compute sensitivity w.r.t selected SUBSET of inputs. Useful for NFs, where conditional dims stay the same.
fc_multiply<FullLayer>(stream, input_weight_matrix(use_inference_params).transposed(), backward_tmp.at(backward_tmp_idx-1), *dL_dinput);
}
}
template <typename T>
std::unique_ptr<typename CutlassMLP<T>::ForwardContext> CutlassMLP<T>::allocate_forward_buffers(hipStream_t stream, uint32_t batch_size) {
auto forward = std::make_unique<ForwardContext>();
forward->hidden.resize(num_forward_activations());
for (uint32_t i = 0; i < num_forward_activations(); ++i) {
forward->hidden[i] = GPUMatrix<T>{m_network_width, batch_size, stream};
}
return forward;
}
template <typename T>
void CutlassMLP<T>::set_params(T* params, T* inference_params, T* backward_params, T* gradients) {
size_t current_pos = 0;
for (size_t i = 0; i < m_weight_matrices.size(); ++i) {
m_weight_matrices[i].set_data_unsafe(params + current_pos);
m_weight_matrices_inference[i].set_data_unsafe(inference_params + current_pos);
m_gradient_matrices[i].set_data_unsafe(gradients + current_pos);
current_pos += m_weight_matrices[i].n_elements();
}
}
template <typename T>
void CutlassMLP<T>::initialize_params(pcg32& rnd, float* params_full_precision, T* params, T* inference_params, T* backward_params, T* gradients, float scale) {
set_params(params, inference_params, backward_params, gradients);
size_t current_pos = 0;
for (size_t i = 0; i < m_weight_matrices_full_precision.size(); ++i) {
m_weight_matrices_full_precision[i].set_data_unsafe(params_full_precision + current_pos);
current_pos += m_weight_matrices_full_precision[i].n_elements();
if (m_activation == Activation::Sine) {
if (i == 0) {
m_weight_matrices_full_precision[i].initialize_siren_uniform_first(rnd, scale);
} else {
m_weight_matrices_full_precision[i].initialize_siren_uniform(rnd, scale);
}
} else {
m_weight_matrices_full_precision[i].initialize_xavier_uniform(rnd, scale);
}
}
}
// Explicitly instantiate CutlassMLP classes.
template class CutlassMLP<network_precision_t>;
TCNN_NAMESPACE_END
|
0a42eed57df90c2a97875c5badb378f90172b80c.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file cutlass_mlp.cu
* @author Thomas Müller, NVIDIA
* @brief CUTLASS implementation of an optimized multi-layer perceptron. Supports online training
* and simultaneous inference.
*/
#include <tiny-cuda-nn/networks/cutlass_mlp.h>
#include <tiny-cuda-nn/cutlass_matmul.h>
#include <tiny-cuda-nn/multi_stream.h>
TCNN_NAMESPACE_BEGIN
template <typename T>
CutlassMLP<T>::CutlassMLP(
uint32_t input_width,
uint32_t network_width,
uint32_t output_width,
uint32_t n_hidden_layers,
Activation activation,
Activation output_activation
) :
m_input_width{input_width},
m_network_width{network_width},
m_output_width{output_width},
m_n_hidden_layers{n_hidden_layers},
m_activation{activation},
m_output_activation{output_activation},
m_can_fuse_activation{activation != Activation::Sine}
{
m_padded_output_width = next_multiple(m_output_width, tensorcore_width);
if (n_hidden_layers > 0) {
m_n_hidden_matmuls = n_hidden_layers-1;
} else {
m_n_hidden_matmuls = 0;
}
// Create matrices related to weights
if (n_hidden_layers == 0) {
m_weight_matrices.emplace_back(nullptr, m_padded_output_width, m_input_width);
m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, m_input_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, m_input_width);
m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, m_input_width);
} else {
m_weight_matrices.emplace_back(nullptr, m_network_width, m_input_width);
m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_input_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_input_width);
m_gradient_matrices.emplace_back(nullptr, m_network_width, m_input_width);
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
m_weight_matrices.emplace_back(nullptr, m_network_width, m_network_width);
m_weight_matrices_inference.emplace_back(nullptr, m_network_width, m_network_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_network_width, m_network_width);
m_gradient_matrices.emplace_back(nullptr, m_network_width, m_network_width);
}
m_weight_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width);
m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, m_network_width);
m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, m_network_width);
m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, m_network_width);
}
// Determine total number of memory entries and set it
m_total_n_params = 0;
for (const auto& m : m_weight_matrices) {
m_total_n_params += m.n_elements();
}
}
template <typename CutlassLayer, typename T>
bool compute_layer(
cudaStream_t stream,
bool is_inference,
Activation activation,
const GPUMatrix<T, RM>& weights,
const GPUMatrixDynamic<T>& input,
GPUMatrixDynamic<T>& output,
GPUMatrixDynamic<T>& activation_output
) {
bool can_fuse_activation = true;
if (!is_inference) {
// Never disallow fusing if the caller passes the same output and activation_output buffers... in that case,
// invertibility of the activation function may be ignored.
can_fuse_activation &= activation != Activation::Sine || &output == &activation_output;
}
if (can_fuse_activation) {
fc_multiply<CutlassLayer>(stream, weights, input, output, activation);
} else {
fc_multiply<CutlassLayer>(stream, weights, input, output);
activation_gpu(stream, activation, output, activation_output);
}
return can_fuse_activation;
}
template <typename CutlassLayer, typename T>
bool compute_inference_layer(
cudaStream_t stream,
Activation activation,
const GPUMatrix<T, RM>& weights,
const GPUMatrixDynamic<T>& input,
GPUMatrixDynamic<T>& output
) {
return compute_layer<CutlassLayer>(stream, true, activation, weights, input, output, output);
}
template <typename T>
void CutlassMLP<T>::inference_mixed_precision_impl(cudaStream_t stream, const GPUMatrixDynamic<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_params) {
// If there are no hidden layers, the network is just a simple matmul.
if (m_n_hidden_layers == 0) {
compute_inference_layer<LastLayer>(stream, m_output_activation, input_weight_matrix(use_inference_params), input, output);
return;
}
uint32_t batch_size = input.n();
GPUMatrix<T> inference_tmp[2] = {
GPUMatrix<T>{m_network_width, batch_size, stream},
GPUMatrix<T>{m_network_width, batch_size, stream},
};
m_inference_graph.capture_and_execute(stream, false, [&]() {
// Run the actual network
{
uint32_t tmp_idx = 0;
// Input layer
compute_inference_layer<FullLayer>(stream, m_activation, input_weight_matrix(use_inference_params), input, inference_tmp[tmp_idx++ % 2]);
// Hidden layers
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
compute_inference_layer<FullLayer>(stream, m_activation, weight_matrix_at(use_inference_params, i), inference_tmp[(tmp_idx + 1) % 2], inference_tmp[tmp_idx % 2]);
++tmp_idx;
}
// Output
compute_inference_layer<LastLayer>(stream, m_output_activation, output_weight_matrix(use_inference_params), inference_tmp[(tmp_idx + 1) % 2], output);
}
});
}
template <typename T>
std::unique_ptr<Context> CutlassMLP<T>::forward_impl(cudaStream_t stream, const GPUMatrixDynamic<T>& input, GPUMatrixDynamic<T>* output, bool use_inference_params, bool prepare_input_gradients) {
// If there are no hidden layers, the network is just a simple matmul. No tmp buffers required
if (m_n_hidden_layers == 0) {
if (output) {
compute_layer<LastLayer>(stream, false, m_output_activation, input_weight_matrix(use_inference_params), input, *output, *output);
}
return std::make_unique<ForwardContext>(); // Nothing to save -- empty context
}
// Make sure our temporary buffers have the correct size for the given batch size
uint32_t batch_size = input.n();
auto forward = allocate_forward_buffers(stream, batch_size);
// Run the actual network
uint32_t tmp_idx = 0;
bool fused = compute_layer<FullLayer>(
stream,
false,
m_activation,
input_weight_matrix(use_inference_params),
input,
forward->hidden.at(tmp_idx),
m_can_fuse_activation ? forward->hidden.at(tmp_idx) : forward->hidden.at(tmp_idx+1)
);
tmp_idx += fused ? 1 : 2;
// layers
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
fused = compute_layer<FullLayer>(
stream,
false,
m_activation,
weight_matrix_at(use_inference_params, i),
forward->hidden.at(tmp_idx-1),
forward->hidden.at(tmp_idx),
m_can_fuse_activation ? forward->hidden.at(tmp_idx) : forward->hidden.at(tmp_idx+1)
);
tmp_idx += fused ? 1 : 2;
}
if (output) {
compute_layer<LastLayer>(stream, false, m_output_activation, output_weight_matrix(use_inference_params), forward->hidden.at(tmp_idx-1), *output, *output);
}
return forward;
}
template <typename T>
void CutlassMLP<T>::backward_impl(
cudaStream_t stream,
const Context& ctx,
const GPUMatrixDynamic<T>& input,
const GPUMatrixDynamic<T>& output,
const GPUMatrixDynamic<T>& dL_doutput,
GPUMatrixDynamic<T>* dL_dinput,
bool use_inference_params,
EGradientMode param_gradients_mode
) {
// Make sure our temporary buffers have the correct size for the given batch size
uint32_t batch_size = dL_doutput.n();
std::vector<GPUMatrix<T>> backward_tmp(num_forward_activations());
for (uint32_t i = 0; i < num_forward_activations(); ++i) {
backward_tmp[i] = GPUMatrix<T>{m_network_width, batch_size, stream};
}
// Compute transfer of output activation in-place... it's treated specially for performance reasons
GPUMatrixDynamic<T> backward_output_tmp;
if (m_output_activation != Activation::None) {
backward_output_tmp = {m_padded_output_width, batch_size, stream, dL_doutput.layout()};
activation_backward_output_gpu(stream, dL_doutput.n_elements(), m_output_activation, output.data(), dL_doutput.data(), backward_output_tmp.data());
}
// Backprop
// - weight_gradient.T = activation * output_gradient.T
// - input_gradient = weights.T * output_gradient
// - RELU: pre_activation_gradinet = post_activation_gradient if val > 0 else 0
const float param_gradient_beta = param_gradients_mode == EGradientMode::Accumulate ? 1.0f : 0.0f;
std::vector<SyncedMultiStream> multi_streams;
const auto& forward = dynamic_cast<const ForwardContext&>(ctx);
int split_k_factor = batch_size / std::min((uint32_t)(1 << 12), batch_size);
const GPUMatrixDynamic<T>& tmp_dL_doutput = m_output_activation == Activation::None ? dL_doutput : backward_output_tmp;
// If there are no hidden layers, the network is just a simple matmul
if (m_n_hidden_layers == 0) {
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<LastLayerK>(multi_streams.back().get(1), tmp_dL_doutput, input.transposed(), input_gradient_matrix(), split_k_factor, param_gradient_beta);
}
if (dL_dinput) {
fc_multiply<FullLayer>(stream, input_weight_matrix(use_inference_params).transposed(), tmp_dL_doutput, *dL_dinput);
}
return;
}
uint32_t tmp_idx = (m_can_fuse_activation ? (m_n_hidden_matmuls+1) : ((m_n_hidden_matmuls+1) * 2)) - 1;
uint32_t backward_tmp_idx = 0;
// Output layer
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<LastLayerK>(multi_streams.back().get(1), tmp_dL_doutput, forward.hidden.at(tmp_idx).transposed(), output_gradient_matrix(), split_k_factor, param_gradient_beta);
}
if (!m_can_fuse_activation) {
fc_multiply<FullLayer>(stream, output_weight_matrix(use_inference_params).transposed(), tmp_dL_doutput, backward_tmp.at(backward_tmp_idx));
activation_backward_gpu(stream, m_activation, forward.hidden.at(tmp_idx-1), backward_tmp.at(backward_tmp_idx));
} else {
fc_multiply<FullLayer>(stream, output_weight_matrix(use_inference_params).transposed(), tmp_dL_doutput, forward.hidden.at(tmp_idx), backward_tmp.at(backward_tmp_idx), m_activation, true);
}
tmp_idx -= m_can_fuse_activation ? 1 : 2;
++backward_tmp_idx;
// layers
for (uint32_t i = 0; i < m_n_hidden_matmuls; ++i) {
uint32_t matrix_idx = m_n_hidden_matmuls - i - 1;
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<FullLayerK>(multi_streams.back().get(1), backward_tmp.at(backward_tmp_idx-1), forward.hidden.at(tmp_idx).transposed(), gradient_matrix_at(matrix_idx), split_k_factor, param_gradient_beta);
}
if (!m_can_fuse_activation) {
fc_multiply<FullLayer>(stream, weight_matrix_at(use_inference_params, matrix_idx).transposed(), backward_tmp.at(backward_tmp_idx-1), backward_tmp.at(backward_tmp_idx));
activation_backward_gpu(stream, m_activation, forward.hidden.at(tmp_idx-1), backward_tmp.at(backward_tmp_idx));
} else {
fc_multiply<FullLayer>(stream, weight_matrix_at(use_inference_params, matrix_idx).transposed(), backward_tmp.at(backward_tmp_idx-1), forward.hidden.at(tmp_idx), backward_tmp.at(backward_tmp_idx), m_activation, true);
}
tmp_idx -= m_can_fuse_activation ? 1 : 2;
++backward_tmp_idx;
}
if (param_gradients_mode != EGradientMode::Ignore) {
multi_streams.emplace_back(stream, 2);
fc_multiply_split_k<FullLayerK>(multi_streams.back().get(1), backward_tmp.at(backward_tmp_idx-1), input.transposed(), input_gradient_matrix(), split_k_factor, param_gradient_beta);
}
// If requested, compute sensitivity of loss w.r.t. inputs
if (dL_dinput) {
// optimization opportunity to only compute sensitivity w.r.t selected SUBSET of inputs. Useful for NFs, where conditional dims stay the same.
fc_multiply<FullLayer>(stream, input_weight_matrix(use_inference_params).transposed(), backward_tmp.at(backward_tmp_idx-1), *dL_dinput);
}
}
template <typename T>
std::unique_ptr<typename CutlassMLP<T>::ForwardContext> CutlassMLP<T>::allocate_forward_buffers(cudaStream_t stream, uint32_t batch_size) {
auto forward = std::make_unique<ForwardContext>();
forward->hidden.resize(num_forward_activations());
for (uint32_t i = 0; i < num_forward_activations(); ++i) {
forward->hidden[i] = GPUMatrix<T>{m_network_width, batch_size, stream};
}
return forward;
}
template <typename T>
void CutlassMLP<T>::set_params(T* params, T* inference_params, T* backward_params, T* gradients) {
size_t current_pos = 0;
for (size_t i = 0; i < m_weight_matrices.size(); ++i) {
m_weight_matrices[i].set_data_unsafe(params + current_pos);
m_weight_matrices_inference[i].set_data_unsafe(inference_params + current_pos);
m_gradient_matrices[i].set_data_unsafe(gradients + current_pos);
current_pos += m_weight_matrices[i].n_elements();
}
}
template <typename T>
void CutlassMLP<T>::initialize_params(pcg32& rnd, float* params_full_precision, T* params, T* inference_params, T* backward_params, T* gradients, float scale) {
set_params(params, inference_params, backward_params, gradients);
size_t current_pos = 0;
for (size_t i = 0; i < m_weight_matrices_full_precision.size(); ++i) {
m_weight_matrices_full_precision[i].set_data_unsafe(params_full_precision + current_pos);
current_pos += m_weight_matrices_full_precision[i].n_elements();
if (m_activation == Activation::Sine) {
if (i == 0) {
m_weight_matrices_full_precision[i].initialize_siren_uniform_first(rnd, scale);
} else {
m_weight_matrices_full_precision[i].initialize_siren_uniform(rnd, scale);
}
} else {
m_weight_matrices_full_precision[i].initialize_xavier_uniform(rnd, scale);
}
}
}
// Explicitly instantiate CutlassMLP classes.
template class CutlassMLP<network_precision_t>;
TCNN_NAMESPACE_END
|
6f5f2b4e700bf444fbb5e574220a0c3bdce685fc.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* dynamics.cu
*
* Created on: Aug 5, 2016
* Author: Abuenameh
*/
#include <iostream>
#include <limits>
#include <iterator>
#include <iomanip>
#include <fstream>
using std::cout;
using std::endl;
using std::ostream_iterator;
using std::numeric_limits;
using std::ostream;
using std::ostringstream;
using std::setprecision;
using std::ofstream;
#include <boost/algorithm/string.hpp>
using boost::algorithm::replace_all_copy;
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/complex.h>
#include <thrust/functional.h>
#include <thrust/tabulate.h>
#include <thrust/extrema.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/count.h>
using thrust::device_vector;
using thrust::host_vector;
using thrust::complex;
using thrust::counting_iterator;
using thrust::iterator_adaptor;
using thrust::use_default;
using thrust::iterator_core_access;
using thrust::equal_to;
using thrust::multiplies;
using thrust::divides;
using thrust::minus;
using thrust::tabulate;
using thrust::max_element;
using thrust::plus;
#include <Eigen/Dense>
#include <Eigen/Sparse>
//#include <Eigen/SPQRSupport>
//#include <Eigen/SparseQR>
#include <Eigen/OrderingMethods>
using Eigen::MatrixXcd;
using Eigen::VectorXcd;
using Eigen::ComputeThinU;
using Eigen::ComputeThinV;
using Eigen::CompleteOrthogonalDecomposition;
using Eigen::BDCSVD;
using Eigen::SparseMatrix;
// using Eigen::SPQR;
using Eigen::VectorXi;
// using Eigen::SparseQR;
using Eigen::COLAMDOrdering;
using Eigen::AMDOrdering;
using Eigen::NaturalOrdering;
using Eigen::Lower;
using Eigen::Matrix;
using Eigen::ComputeFullU;
using Eigen::ComputeFullV;
#include "gutzwiller.hpp"
#include "dynamics.hpp"
typedef Matrix<std::complex<double>, nmax + 1, nmax + 1> GramMatrix;
typedef Matrix<std::complex<double>, nmax + 1, 1> SiteVector;
#ifdef CPU
typedef host_vector<complex<double>> state_type;
typedef host_vector<double> double_vector;
typedef host_vector<complex<double>> complex_vector;
typedef host_vector<int> int_vector;
#else
typedef device_vector<complex<double>> state_type;
typedef device_vector<double> double_vector;
typedef device_vector<complex<double>> complex_vector;
typedef device_vector<int> int_vector;
#endif
extern void hamiltonian(state_type& fc, state_type& f, const double_vector& U0,
const double_vector& dU, const double_vector& J, const double_vector& mu,
complex_vector& norm1, complex_vector& norm2, complex_vector& norm3,
state_type& H);
extern void dynamicshamiltonian(state_type& fc, state_type& f,
const double_vector& U0, const double_vector& dU, const double_vector& J,
const double_vector& mu, complex_vector& norm1, complex_vector& norm2,
complex_vector& norm3, const double_vector U0p, const double_vector& Jp,
state_type& H);
template<typename Iterator>
class strided_range {
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor: public thrust::unary_function<difference_type,
difference_type> {
difference_type stride;
stride_functor(difference_type stride) :
stride(stride) {
}
__host__ __device__
difference_type operator()(const difference_type& i) const {
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator, TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride) :
first(first), last(last), stride(stride) {
}
iterator begin(void) const {
return PermutationIterator(first,
TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const {
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
template<typename Iterator>
class repeat_iterator: public iterator_adaptor<repeat_iterator<Iterator>,
Iterator, use_default, use_default, use_default, use_default> {
public:
typedef iterator_adaptor<repeat_iterator<Iterator>, Iterator, use_default,
use_default, use_default, use_default> super_t;
__host__ __device__
repeat_iterator(const Iterator &x, int n) :
super_t(x), begin(x), n(n) {
}
friend class iterator_core_access;
private:
unsigned int n;
const Iterator begin;
__host__ __device__
typename super_t::reference dereference() const {
return *(begin + (this->base() - begin) / n);
}
};
template<typename T>
__host__ __device__
repeat_iterator<T> make_repeat_iterator(T x, int n) {
repeat_iterator<T> a(x, n);
return a;
}
template<typename T>
struct square {
__host__ __device__
T operator()(const T& x) const {
return x * x;
}
};
template<class T>
struct divideby {
divideby(T d) :
d(d) {
}
__host__ __device__
T operator()(int i) {
return i / d;
}
T d;
};
template<typename T>
struct diff {
diff(int n) :
n(n) {
}
__host__ __device__
T operator()(int m) {
if (n == m) {
return 1;
} else {
return 0;
}
}
int n;
};
template<typename T>
struct normop {
__host__ __device__
T operator()(const complex<T>& x) const {
return norm(x);
}
};
template<typename T>
struct conjop {
__host__ __device__
complex<T> operator()(const complex<T>& x) const {
return conj(x);
}
};
template<typename T>
class mathematic {
public:
mathematic(T& v_) :
v(v_) {
}
T& v;
};
template<>
class mathematic<double> {
public:
mathematic(double d_) :
d(d_) {
}
double d;
};
template<>
class mathematic<std::complex<double> > {
public:
mathematic(std::complex<double> c_) :
c(c_) {
}
std::complex<double> c;
};
mathematic<double> mathe(double d) {
return mathematic<double>(d);
}
mathematic<std::complex<double> > mathe(std::complex<double> c) {
return mathematic<std::complex<double> >(c);
}
ostream& operator<<(ostream& out, const mathematic<double> m) {
double d = m.d;
ostringstream oss;
oss << setprecision(numeric_limits<double>::digits10) << d;
out << replace_all_copy(oss.str(), "e", "*^");
return out;
}
ostream& operator<<(ostream& out, const mathematic<std::complex<double> > m) {
std::complex<double> c = m.c;
out << "(" << mathematic<double>(c.real()) << ")+I("
<< mathematic<double>(c.imag()) << ")";
return out;
}
void dynamics::operator()(const ode_state_type& fcon, ode_state_type& dfdt,
const double t) {
vector<complex<double>> fcom(fcon.begin(), fcon.end());
state_type f0(Ndim);
thrust::copy(fcon.begin(), fcon.end(), f0.begin());
state_type fc0(Ndim);
transform(f0.begin(), f0.end(), fc0.begin(), conjop<double>());
state_type f = f0;
int N = f.size() / L / (nmax + 1);
host_vector<double> U0h(N), dUh(N*L), Jh(N*L), muh(N), U0ph(N), Jph(N*L);
for (int i = 0; i < N; i++) {
U0h[i] = U0f(i, t);
copy_n(dUf(i, t).begin(), L, dUh.begin() + i * L);
copy_n(Jf(i, t).begin(), L, Jh.begin() + i * L);
muh[i] = muf(i, t);
U0ph[i] = U0pf(i, t);
copy_n(Jpf(i, t).begin(), L, Jph.begin() + i * L);
}
double_vector U0 = U0h, dU = dUh, J = Jh, mu = muh, U0p = U0ph, Jp = Jph;
// cout << "U = " << U0h[0] << endl;
// cout << "dU = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(dUh[i]) << ",";
// }
// cout << endl;
// cout << "J = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(Jh[i]) << ",";
// }
// cout << endl;
// cout << "Up = " << U0p[0] << endl;
// cout << "Jp = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(Jph[i]) << ",";
// }
// cout << endl;
int_vector okeys(N * L);
int_vector nmaxkeys(N * L * (nmax + 1));
int_vector Lkeys(N * L);
auto nmaxrep = make_repeat_iterator(counting_iterator<int>(0), nmax + 1);
copy(nmaxrep, nmaxrep + N * L * (nmax + 1), nmaxkeys.begin());
complex_vector norms(Ndim);
transform(fc0.begin(), fc0.end(), f.begin(), norms.begin(),
multiplies<complex<double>>());
complex_vector normi(N * L);
reduce_by_key(nmaxkeys.begin(), nmaxkeys.end(), norms.begin(),
okeys.begin(), normi.begin());
auto Lrep = make_repeat_iterator(counting_iterator<int>(0), L);
copy(Lrep, Lrep + N * L, Lkeys.begin());
complex_vector norm0(N);
reduce_by_key(Lkeys.begin(), Lkeys.begin() + N * L, normi.begin(),
okeys.begin(), norm0.begin(), equal_to<int>(),
multiplies<complex<double>>());
host_vector<complex<double>> norm0h = norm0, normih = normi;
host_vector<complex<double>> norm1h(N*L), norm2h(N*L), norm3h(N*L);
for (int i = 0; i < L; i++) {
for (int j = 0; j < N; j++) {
norm1h[j * L + i] = norm0h[j] / normih[j * L + i];
norm2h[j * L + i] = norm1h[j * L + i] / normih[j * L + mod(i + 1)];
norm3h[j * L + i] = norm2h[j * L + i] / normih[j * L + mod(i + 2)];
}
}
complex_vector norm1 = norm1h, norm2 = norm2h, norm3 = norm3h;
// cout << "norm1 = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(norm1h[i]) << ",";
// }
// cout << endl;
// cout << "norm2 = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(norm2h[i]) << ",";
// }
// cout << endl;
// cout << "norm3 = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(norm3h[i]) << ",";
// }
// cout << endl;
state_type fc = fc0;
state_type H(N);
// host_vector<double> U0h = U0;
// host_vector<double> dUh = dU;
// host_vector<double> Jh = J;
// host_vector<double> muh = mu;
// host_vector<complex<double>> norm1h2 = norm1;
// host_vector<complex<double>> norm2h2 = norm2;
// host_vector<complex<double>> norm3h2 = norm3;
dynamicshamiltonian(fc, f, U0, dU, J, mu, norm1, norm2, norm3, U0p, Jp, H);
state_type E = H;
host_vector<complex<double>> Eh = E;
// cout << "{" << mathe(norm0h[0]);
// for (int i = 1; i < norm0h.size(); i++) {
// cout << "," << mathe(norm0h[i]);
// }
// cout << "}" << endl;
complex_vector dH(Ndim);
complex_vector dnorms(Ndim);
complex_vector dnormi(N * L);
complex_vector dnorm0(N);
// complex_vector dnorm1(N * L), dnorm2(N * L), dnorm3(N * L);
// complex_vector covariant(Ndim);
host_vector<complex<double>> covarianth(Ndim);
for (int i = 0; i < L; i++) {
for (int n = 0; n <= nmax; n++) {
f = f0;
fc = fc0;
for (int k = 0; k < N; k++) {
tabulate(fc.begin() + k * L * (nmax + 1) + i * (nmax + 1),
fc.begin() + k * L * (nmax + 1) + (i + 1) * (nmax + 1),
diff<double>(n));
}
transform(fc.begin(), fc.end(), f.begin(), dnorms.begin(),
multiplies<complex<double>>());
reduce_by_key(nmaxkeys.begin(), nmaxkeys.end(), dnorms.begin(),
okeys.begin(), dnormi.begin());
reduce_by_key(Lkeys.begin(), Lkeys.end(), dnormi.begin(),
okeys.begin(), dnorm0.begin(), equal_to<int>(),
multiplies<complex<double>>());
host_vector<complex<double>> dnorm0h = dnorm0, dnormih = dnormi;
host_vector<complex<double>> dnorm1h(N*L), dnorm2h(N*L), dnorm3h(N*L);
for (int k = 0; k < N; k++) {
covarianth[in(k, i, n)] = dnorm0h[k];
for (int j = 0; j < L; j++) {
dnorm1h[k * L + j] = dnorm0h[k] / dnormih[k * L + j];
dnorm2h[k * L + j] = dnorm1h[k * L + j]
/ dnormih[k * L + mod(j + 1)];
dnorm3h[k * L + j] = dnorm2h[k * L + j]
/ dnormih[k * L + mod(j + 2)];
}
}
complex_vector dnorm1 = dnorm1h, dnorm2 = dnorm2h, dnorm3 = dnorm3h;
dynamicshamiltonian(fc, f, U0, dU, J, mu, dnorm1, dnorm2, dnorm3,
U0p, Jp, H);
host_vector<complex<double>> Hh2=H;
// host_vector<double> Hh2=U0;
// cout << "{" << mathe(Hh2[0]);
// for (int i = 1; i < Hh2.size(); i++) {
// cout << "," << mathe(Hh2[i]);
// }
// cout << "}" << endl;
strided_range<state_type::iterator> stride(dH.begin() + in(i, n),
dH.end(), L * (nmax + 1));
copy(H.begin(), H.end(), stride.begin());
}
}
complex_vector covariant = covarianth;
// cout << "{" << mathe(covarianth[0]);
// for (int i = 1; i < covarianth.size(); i++) {
// cout << "," << mathe(covarianth[i]);
// }
// cout << "}" << endl;
host_vector<complex<double>> dHh = dH;
// cout << "dH" << endl;
// cout << "{" << mathe(dHh[0]);
// for (int i = 1; i < dHh.size(); i++) {
// cout << "," << mathe(dHh[i]);
// }
// cout << "}" << endl;
auto norm1rep = make_repeat_iterator(norm1.begin(), nmax + 1);
auto norm0rep = make_repeat_iterator(norm0.begin(), L * (nmax + 1));
auto Erep = make_repeat_iterator(E.begin(), L * (nmax + 1));
state_type Hi1(Ndim);
transform(dH.begin(), dH.end(), norm0rep, Hi1.begin(),
divides<complex<double>>());
// host_vector<complex<double>> Hi1h = Hi1;
// cout << "{" << mathe(Hi1h[0]);
// for (int i = 1; i < Hi1h.size(); i++) {
// cout << "," << mathe(Hi1h[i]);
// }
// cout << "}" << endl;
state_type Hi2(Ndim);
transform(covariant.begin(), covariant.end(), Erep, Hi2.begin(),
multiplies<complex<double>>());
state_type Hi3(Ndim);
complex_vector norm0sq(N * L * (nmax + 1));
transform(norm0rep, norm0rep + N * L * (nmax + 1), norm0sq.begin(),
square<complex<double>>());
transform(Hi2.begin(), Hi2.end(), norm0sq.begin(), Hi3.begin(),
divides<complex<double>>());
state_type Hi(Ndim);
transform(Hi1.begin(), Hi1.end(), Hi3.begin(), Hi.begin(),
minus<complex<double>>());
host_vector<complex<double>> Hih = Hi;
// host_vector<complex<double>> fh = f;
// host_vector<complex<double>> norm0h = norm0;
// host_vector<complex<double>> norm1h = norm1;
// host_vector<complex<double>> normih = normi;
// host_vector<complex<double>> covarianth = covariant;
complex_vector ddnorms(Ndim);
complex_vector ddnormi(N * L);
complex_vector ddnorm0(N);
MatrixXcd Gij = MatrixXcd::Zero(Ndim, Ndim);
// SparseMatrix<std::complex<double>> Gij(Ndim, Ndim);
// Gij.reserve(VectorXi::Constant(Ndim, nmax+1));
// for (int k = 0; k < N; k++) {
for (int i = 0; i < L; i++) {
for (int n = 0; n <= nmax; n++) {
for (int m = 0; m <= nmax; m++) {
fc = fc0;
for (int k = 0; k < N; k++) {
tabulate(fc.begin() + k * L * (nmax + 1) + i * (nmax + 1),
fc.begin() + k * L * (nmax + 1) + (i + 1) * (nmax + 1),
diff<double>(n));
}
f = f0;
for (int k = 0; k < N; k++) {
tabulate(f.begin() + k * L * (nmax + 1) + i * (nmax + 1),
f.begin() + k * L * (nmax + 1) + (i + 1) * (nmax + 1),
diff<double>(m));
}
// host_vector<complex<double>> ddnorms(Ndim), ddnormi(N*L), ddnorm0(N);
transform(fc.begin(), fc.end(), f.begin(), ddnorms.begin(),
multiplies<complex<double>>());
reduce_by_key(nmaxkeys.begin(), nmaxkeys.end(), ddnorms.begin(),
okeys.begin(), ddnormi.begin());
reduce_by_key(Lkeys.begin(), Lkeys.end(), ddnormi.begin(),
okeys.begin(), ddnorm0.begin(), equal_to<int>(),
multiplies<complex<double>>());
host_vector<complex<double>> ddnorm0h = ddnorm0;
for (int k = 0; k < N; k++) {
Gij(in(k, i, n), in(k, i, m)) = std::complex<double>(
ddnorm0h[k] / norm0h[k]
- covarianth[in(k, i, n)]
* conj(covarianth[in(k, i, m)])
/ (norm0h[k] * norm0h[k]));
// Gij.insert(in(k, i, n), in(k, i, m)) = std::complex<double>(1, 0)*(std::complex<double>(ddnorm0[k]/norm0h[k] - covarianth[in(k, i, n)]
// * conj(covarianth[in(k, i, m)])
// / (norm0h[k] * norm0h[k])));
}
}
}
}
// Gij.makeCompressed();
#ifndef __HIPCC__
VectorXcd Hiv(Ndim);
for (int i = 0; i < Ndim; i++) {
Hiv[i] = Hih[i];
}
VectorXcd dfdtv = Gij.completeOrthogonalDecomposition().solve(Hiv);
for (int i = 0; i < Ndim; i++) {
dfdt[i] = -std::complex<double>(0, 1) * dfdtv[i];
}
// cout << "{" << mathe(Hiv[0]);
// for (int i = 1; i < Hiv.size(); i++) {
// cout << "," << mathe(Hiv[i]);
// }
// cout << "}" << endl;
// cout << "{" << mathe(dfdt[0]);
// for (int i = 1; i < dfdt.size(); i++) {
// cout << "," << mathe(dfdt[i]);
// }
// cout << "}" << endl;
//// std::copy(dfdt.begin(), dfdt.end(), ostream_iterator<std::complex<double>>(cout,","));
// cout << endl;
#endif
}
|
6f5f2b4e700bf444fbb5e574220a0c3bdce685fc.cu
|
/*
* dynamics.cu
*
* Created on: Aug 5, 2016
* Author: Abuenameh
*/
#include <iostream>
#include <limits>
#include <iterator>
#include <iomanip>
#include <fstream>
using std::cout;
using std::endl;
using std::ostream_iterator;
using std::numeric_limits;
using std::ostream;
using std::ostringstream;
using std::setprecision;
using std::ofstream;
#include <boost/algorithm/string.hpp>
using boost::algorithm::replace_all_copy;
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/complex.h>
#include <thrust/functional.h>
#include <thrust/tabulate.h>
#include <thrust/extrema.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/count.h>
using thrust::device_vector;
using thrust::host_vector;
using thrust::complex;
using thrust::counting_iterator;
using thrust::iterator_adaptor;
using thrust::use_default;
using thrust::iterator_core_access;
using thrust::equal_to;
using thrust::multiplies;
using thrust::divides;
using thrust::minus;
using thrust::tabulate;
using thrust::max_element;
using thrust::plus;
#include <Eigen/Dense>
#include <Eigen/Sparse>
//#include <Eigen/SPQRSupport>
//#include <Eigen/SparseQR>
#include <Eigen/OrderingMethods>
using Eigen::MatrixXcd;
using Eigen::VectorXcd;
using Eigen::ComputeThinU;
using Eigen::ComputeThinV;
using Eigen::CompleteOrthogonalDecomposition;
using Eigen::BDCSVD;
using Eigen::SparseMatrix;
// using Eigen::SPQR;
using Eigen::VectorXi;
// using Eigen::SparseQR;
using Eigen::COLAMDOrdering;
using Eigen::AMDOrdering;
using Eigen::NaturalOrdering;
using Eigen::Lower;
using Eigen::Matrix;
using Eigen::ComputeFullU;
using Eigen::ComputeFullV;
#include "gutzwiller.hpp"
#include "dynamics.hpp"
typedef Matrix<std::complex<double>, nmax + 1, nmax + 1> GramMatrix;
typedef Matrix<std::complex<double>, nmax + 1, 1> SiteVector;
#ifdef CPU
typedef host_vector<complex<double>> state_type;
typedef host_vector<double> double_vector;
typedef host_vector<complex<double>> complex_vector;
typedef host_vector<int> int_vector;
#else
typedef device_vector<complex<double>> state_type;
typedef device_vector<double> double_vector;
typedef device_vector<complex<double>> complex_vector;
typedef device_vector<int> int_vector;
#endif
extern void hamiltonian(state_type& fc, state_type& f, const double_vector& U0,
const double_vector& dU, const double_vector& J, const double_vector& mu,
complex_vector& norm1, complex_vector& norm2, complex_vector& norm3,
state_type& H);
extern void dynamicshamiltonian(state_type& fc, state_type& f,
const double_vector& U0, const double_vector& dU, const double_vector& J,
const double_vector& mu, complex_vector& norm1, complex_vector& norm2,
complex_vector& norm3, const double_vector U0p, const double_vector& Jp,
state_type& H);
template<typename Iterator>
class strided_range {
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor: public thrust::unary_function<difference_type,
difference_type> {
difference_type stride;
stride_functor(difference_type stride) :
stride(stride) {
}
__host__ __device__
difference_type operator()(const difference_type& i) const {
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator, TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride) :
first(first), last(last), stride(stride) {
}
iterator begin(void) const {
return PermutationIterator(first,
TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const {
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
template<typename Iterator>
class repeat_iterator: public iterator_adaptor<repeat_iterator<Iterator>,
Iterator, use_default, use_default, use_default, use_default> {
public:
typedef iterator_adaptor<repeat_iterator<Iterator>, Iterator, use_default,
use_default, use_default, use_default> super_t;
__host__ __device__
repeat_iterator(const Iterator &x, int n) :
super_t(x), begin(x), n(n) {
}
friend class iterator_core_access;
private:
unsigned int n;
const Iterator begin;
__host__ __device__
typename super_t::reference dereference() const {
return *(begin + (this->base() - begin) / n);
}
};
template<typename T>
__host__ __device__
repeat_iterator<T> make_repeat_iterator(T x, int n) {
repeat_iterator<T> a(x, n);
return a;
}
template<typename T>
struct square {
__host__ __device__
T operator()(const T& x) const {
return x * x;
}
};
template<class T>
struct divideby {
divideby(T d) :
d(d) {
}
__host__ __device__
T operator()(int i) {
return i / d;
}
T d;
};
template<typename T>
struct diff {
diff(int n) :
n(n) {
}
__host__ __device__
T operator()(int m) {
if (n == m) {
return 1;
} else {
return 0;
}
}
int n;
};
template<typename T>
struct normop {
__host__ __device__
T operator()(const complex<T>& x) const {
return norm(x);
}
};
template<typename T>
struct conjop {
__host__ __device__
complex<T> operator()(const complex<T>& x) const {
return conj(x);
}
};
template<typename T>
class mathematic {
public:
mathematic(T& v_) :
v(v_) {
}
T& v;
};
template<>
class mathematic<double> {
public:
mathematic(double d_) :
d(d_) {
}
double d;
};
template<>
class mathematic<std::complex<double> > {
public:
mathematic(std::complex<double> c_) :
c(c_) {
}
std::complex<double> c;
};
mathematic<double> mathe(double d) {
return mathematic<double>(d);
}
mathematic<std::complex<double> > mathe(std::complex<double> c) {
return mathematic<std::complex<double> >(c);
}
ostream& operator<<(ostream& out, const mathematic<double> m) {
double d = m.d;
ostringstream oss;
oss << setprecision(numeric_limits<double>::digits10) << d;
out << replace_all_copy(oss.str(), "e", "*^");
return out;
}
ostream& operator<<(ostream& out, const mathematic<std::complex<double> > m) {
std::complex<double> c = m.c;
out << "(" << mathematic<double>(c.real()) << ")+I("
<< mathematic<double>(c.imag()) << ")";
return out;
}
void dynamics::operator()(const ode_state_type& fcon, ode_state_type& dfdt,
const double t) {
vector<complex<double>> fcom(fcon.begin(), fcon.end());
state_type f0(Ndim);
thrust::copy(fcon.begin(), fcon.end(), f0.begin());
state_type fc0(Ndim);
transform(f0.begin(), f0.end(), fc0.begin(), conjop<double>());
state_type f = f0;
int N = f.size() / L / (nmax + 1);
host_vector<double> U0h(N), dUh(N*L), Jh(N*L), muh(N), U0ph(N), Jph(N*L);
for (int i = 0; i < N; i++) {
U0h[i] = U0f(i, t);
copy_n(dUf(i, t).begin(), L, dUh.begin() + i * L);
copy_n(Jf(i, t).begin(), L, Jh.begin() + i * L);
muh[i] = muf(i, t);
U0ph[i] = U0pf(i, t);
copy_n(Jpf(i, t).begin(), L, Jph.begin() + i * L);
}
double_vector U0 = U0h, dU = dUh, J = Jh, mu = muh, U0p = U0ph, Jp = Jph;
// cout << "U = " << U0h[0] << endl;
// cout << "dU = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(dUh[i]) << ",";
// }
// cout << endl;
// cout << "J = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(Jh[i]) << ",";
// }
// cout << endl;
// cout << "Up = " << U0p[0] << endl;
// cout << "Jp = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(Jph[i]) << ",";
// }
// cout << endl;
int_vector okeys(N * L);
int_vector nmaxkeys(N * L * (nmax + 1));
int_vector Lkeys(N * L);
auto nmaxrep = make_repeat_iterator(counting_iterator<int>(0), nmax + 1);
copy(nmaxrep, nmaxrep + N * L * (nmax + 1), nmaxkeys.begin());
complex_vector norms(Ndim);
transform(fc0.begin(), fc0.end(), f.begin(), norms.begin(),
multiplies<complex<double>>());
complex_vector normi(N * L);
reduce_by_key(nmaxkeys.begin(), nmaxkeys.end(), norms.begin(),
okeys.begin(), normi.begin());
auto Lrep = make_repeat_iterator(counting_iterator<int>(0), L);
copy(Lrep, Lrep + N * L, Lkeys.begin());
complex_vector norm0(N);
reduce_by_key(Lkeys.begin(), Lkeys.begin() + N * L, normi.begin(),
okeys.begin(), norm0.begin(), equal_to<int>(),
multiplies<complex<double>>());
host_vector<complex<double>> norm0h = norm0, normih = normi;
host_vector<complex<double>> norm1h(N*L), norm2h(N*L), norm3h(N*L);
for (int i = 0; i < L; i++) {
for (int j = 0; j < N; j++) {
norm1h[j * L + i] = norm0h[j] / normih[j * L + i];
norm2h[j * L + i] = norm1h[j * L + i] / normih[j * L + mod(i + 1)];
norm3h[j * L + i] = norm2h[j * L + i] / normih[j * L + mod(i + 2)];
}
}
complex_vector norm1 = norm1h, norm2 = norm2h, norm3 = norm3h;
// cout << "norm1 = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(norm1h[i]) << ",";
// }
// cout << endl;
// cout << "norm2 = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(norm2h[i]) << ",";
// }
// cout << endl;
// cout << "norm3 = ";
// for (int i = 0; i < L; i++) {
// cout << mathe(norm3h[i]) << ",";
// }
// cout << endl;
state_type fc = fc0;
state_type H(N);
// host_vector<double> U0h = U0;
// host_vector<double> dUh = dU;
// host_vector<double> Jh = J;
// host_vector<double> muh = mu;
// host_vector<complex<double>> norm1h2 = norm1;
// host_vector<complex<double>> norm2h2 = norm2;
// host_vector<complex<double>> norm3h2 = norm3;
dynamicshamiltonian(fc, f, U0, dU, J, mu, norm1, norm2, norm3, U0p, Jp, H);
state_type E = H;
host_vector<complex<double>> Eh = E;
// cout << "{" << mathe(norm0h[0]);
// for (int i = 1; i < norm0h.size(); i++) {
// cout << "," << mathe(norm0h[i]);
// }
// cout << "}" << endl;
complex_vector dH(Ndim);
complex_vector dnorms(Ndim);
complex_vector dnormi(N * L);
complex_vector dnorm0(N);
// complex_vector dnorm1(N * L), dnorm2(N * L), dnorm3(N * L);
// complex_vector covariant(Ndim);
host_vector<complex<double>> covarianth(Ndim);
for (int i = 0; i < L; i++) {
for (int n = 0; n <= nmax; n++) {
f = f0;
fc = fc0;
for (int k = 0; k < N; k++) {
tabulate(fc.begin() + k * L * (nmax + 1) + i * (nmax + 1),
fc.begin() + k * L * (nmax + 1) + (i + 1) * (nmax + 1),
diff<double>(n));
}
transform(fc.begin(), fc.end(), f.begin(), dnorms.begin(),
multiplies<complex<double>>());
reduce_by_key(nmaxkeys.begin(), nmaxkeys.end(), dnorms.begin(),
okeys.begin(), dnormi.begin());
reduce_by_key(Lkeys.begin(), Lkeys.end(), dnormi.begin(),
okeys.begin(), dnorm0.begin(), equal_to<int>(),
multiplies<complex<double>>());
host_vector<complex<double>> dnorm0h = dnorm0, dnormih = dnormi;
host_vector<complex<double>> dnorm1h(N*L), dnorm2h(N*L), dnorm3h(N*L);
for (int k = 0; k < N; k++) {
covarianth[in(k, i, n)] = dnorm0h[k];
for (int j = 0; j < L; j++) {
dnorm1h[k * L + j] = dnorm0h[k] / dnormih[k * L + j];
dnorm2h[k * L + j] = dnorm1h[k * L + j]
/ dnormih[k * L + mod(j + 1)];
dnorm3h[k * L + j] = dnorm2h[k * L + j]
/ dnormih[k * L + mod(j + 2)];
}
}
complex_vector dnorm1 = dnorm1h, dnorm2 = dnorm2h, dnorm3 = dnorm3h;
dynamicshamiltonian(fc, f, U0, dU, J, mu, dnorm1, dnorm2, dnorm3,
U0p, Jp, H);
host_vector<complex<double>> Hh2=H;
// host_vector<double> Hh2=U0;
// cout << "{" << mathe(Hh2[0]);
// for (int i = 1; i < Hh2.size(); i++) {
// cout << "," << mathe(Hh2[i]);
// }
// cout << "}" << endl;
strided_range<state_type::iterator> stride(dH.begin() + in(i, n),
dH.end(), L * (nmax + 1));
copy(H.begin(), H.end(), stride.begin());
}
}
complex_vector covariant = covarianth;
// cout << "{" << mathe(covarianth[0]);
// for (int i = 1; i < covarianth.size(); i++) {
// cout << "," << mathe(covarianth[i]);
// }
// cout << "}" << endl;
host_vector<complex<double>> dHh = dH;
// cout << "dH" << endl;
// cout << "{" << mathe(dHh[0]);
// for (int i = 1; i < dHh.size(); i++) {
// cout << "," << mathe(dHh[i]);
// }
// cout << "}" << endl;
auto norm1rep = make_repeat_iterator(norm1.begin(), nmax + 1);
auto norm0rep = make_repeat_iterator(norm0.begin(), L * (nmax + 1));
auto Erep = make_repeat_iterator(E.begin(), L * (nmax + 1));
state_type Hi1(Ndim);
transform(dH.begin(), dH.end(), norm0rep, Hi1.begin(),
divides<complex<double>>());
// host_vector<complex<double>> Hi1h = Hi1;
// cout << "{" << mathe(Hi1h[0]);
// for (int i = 1; i < Hi1h.size(); i++) {
// cout << "," << mathe(Hi1h[i]);
// }
// cout << "}" << endl;
state_type Hi2(Ndim);
transform(covariant.begin(), covariant.end(), Erep, Hi2.begin(),
multiplies<complex<double>>());
state_type Hi3(Ndim);
complex_vector norm0sq(N * L * (nmax + 1));
transform(norm0rep, norm0rep + N * L * (nmax + 1), norm0sq.begin(),
square<complex<double>>());
transform(Hi2.begin(), Hi2.end(), norm0sq.begin(), Hi3.begin(),
divides<complex<double>>());
state_type Hi(Ndim);
transform(Hi1.begin(), Hi1.end(), Hi3.begin(), Hi.begin(),
minus<complex<double>>());
host_vector<complex<double>> Hih = Hi;
// host_vector<complex<double>> fh = f;
// host_vector<complex<double>> norm0h = norm0;
// host_vector<complex<double>> norm1h = norm1;
// host_vector<complex<double>> normih = normi;
// host_vector<complex<double>> covarianth = covariant;
complex_vector ddnorms(Ndim);
complex_vector ddnormi(N * L);
complex_vector ddnorm0(N);
MatrixXcd Gij = MatrixXcd::Zero(Ndim, Ndim);
// SparseMatrix<std::complex<double>> Gij(Ndim, Ndim);
// Gij.reserve(VectorXi::Constant(Ndim, nmax+1));
// for (int k = 0; k < N; k++) {
for (int i = 0; i < L; i++) {
for (int n = 0; n <= nmax; n++) {
for (int m = 0; m <= nmax; m++) {
fc = fc0;
for (int k = 0; k < N; k++) {
tabulate(fc.begin() + k * L * (nmax + 1) + i * (nmax + 1),
fc.begin() + k * L * (nmax + 1) + (i + 1) * (nmax + 1),
diff<double>(n));
}
f = f0;
for (int k = 0; k < N; k++) {
tabulate(f.begin() + k * L * (nmax + 1) + i * (nmax + 1),
f.begin() + k * L * (nmax + 1) + (i + 1) * (nmax + 1),
diff<double>(m));
}
// host_vector<complex<double>> ddnorms(Ndim), ddnormi(N*L), ddnorm0(N);
transform(fc.begin(), fc.end(), f.begin(), ddnorms.begin(),
multiplies<complex<double>>());
reduce_by_key(nmaxkeys.begin(), nmaxkeys.end(), ddnorms.begin(),
okeys.begin(), ddnormi.begin());
reduce_by_key(Lkeys.begin(), Lkeys.end(), ddnormi.begin(),
okeys.begin(), ddnorm0.begin(), equal_to<int>(),
multiplies<complex<double>>());
host_vector<complex<double>> ddnorm0h = ddnorm0;
for (int k = 0; k < N; k++) {
Gij(in(k, i, n), in(k, i, m)) = std::complex<double>(
ddnorm0h[k] / norm0h[k]
- covarianth[in(k, i, n)]
* conj(covarianth[in(k, i, m)])
/ (norm0h[k] * norm0h[k]));
// Gij.insert(in(k, i, n), in(k, i, m)) = std::complex<double>(1, 0)*(std::complex<double>(ddnorm0[k]/norm0h[k] - covarianth[in(k, i, n)]
// * conj(covarianth[in(k, i, m)])
// / (norm0h[k] * norm0h[k])));
}
}
}
}
// Gij.makeCompressed();
#ifndef __CUDACC__
VectorXcd Hiv(Ndim);
for (int i = 0; i < Ndim; i++) {
Hiv[i] = Hih[i];
}
VectorXcd dfdtv = Gij.completeOrthogonalDecomposition().solve(Hiv);
for (int i = 0; i < Ndim; i++) {
dfdt[i] = -std::complex<double>(0, 1) * dfdtv[i];
}
// cout << "{" << mathe(Hiv[0]);
// for (int i = 1; i < Hiv.size(); i++) {
// cout << "," << mathe(Hiv[i]);
// }
// cout << "}" << endl;
// cout << "{" << mathe(dfdt[0]);
// for (int i = 1; i < dfdt.size(); i++) {
// cout << "," << mathe(dfdt[i]);
// }
// cout << "}" << endl;
//// std::copy(dfdt.begin(), dfdt.end(), ostream_iterator<std::complex<double>>(cout,","));
// cout << endl;
#endif
}
|
aa4c2247624a7ec9375190f116e0ce6c7ab1f9c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void count_sort(int *x, int *y, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size){
int count = 0;
for(int j = 0; j < size; j++){
if (x[j] < x[idx])
count++;
else if (x[j] == x[idx] && j < idx)
count++;
}
y[count] = x[idx];
}
}
|
aa4c2247624a7ec9375190f116e0ce6c7ab1f9c9.cu
|
#include "includes.h"
__global__ void count_sort(int *x, int *y, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size){
int count = 0;
for(int j = 0; j < size; j++){
if (x[j] < x[idx])
count++;
else if (x[j] == x[idx] && j < idx)
count++;
}
y[count] = x[idx];
}
}
|
4618f4e3d94e1016c9eaf689e884f05f995de9f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/memory.h"
#include "saiga/cuda/reduce.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/core/math/math.h"
#include <fstream>
#include <random>
using Saiga::ArrayView;
using Saiga::CUDA::ThreadInfo;
//#define LECTURE
std::ofstream outstrm;
HD inline uint32_t simpleRand(uint32_t state)
{
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
uint32_t x = state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
return x;
}
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessSimple(ArrayView<T> data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % data.size();
// sum += data[index];
sum += Saiga::CUDA::ldg(data.data() + index);
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
#ifndef LECTURE
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessConstRestricted(ArrayView<T> vdata, const T* __restrict__ data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % vdata.size();
sum += data[index];
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessLdg(ArrayView<T> data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % data.size();
sum += Saiga::CUDA::ldg(data.data() + index);
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
static texture<int, 1, hipReadModeElementType> dataTexture;
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessTexture(ArrayView<T> data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % data.size();
sum += tex1Dfetch(dataTexture, index);
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
#endif
template <typename ElementType>
void randomAccessTest2(int numIndices, int numElements)
{
const int K = 16;
outstrm << numIndices * sizeof(int) / 1024 << ",";
size_t readWrites = numElements * sizeof(ElementType) / 32 + numElements * sizeof(int) * K;
Saiga::CUDA::PerformanceTestHelper test("Coalesced processing test. numIndices: " + std::to_string(numIndices) +
" numElements: " + std::to_string(numElements),
readWrites);
thrust::host_vector<ElementType> data(numIndices);
thrust::host_vector<ElementType> result(numElements, 0);
thrust::host_vector<ElementType> ref(numElements);
thrust::device_vector<ElementType> d_data(data);
thrust::device_vector<ElementType> d_result(result);
int its = 50;
const int BLOCK_SIZE = 128;
const int BLOCKS = Saiga::CUDA::getBlockCount(numElements, BLOCK_SIZE);
{
d_result = result;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() {hipLaunchKernelGGL(( randomAccessSimple<ElementType, BLOCK_SIZE, K>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); });
test.addMeassurement("randomAccessSimple", st.median);
outstrm << test.bandwidth(st.median) << ",";
CUDA_SYNC_CHECK_ERROR();
}
// SAIGA_ASSERT(ref == d_result);
#ifndef LECTURE
{
d_result = result;
// hipFuncSetCacheConfig(randomAccessConstRestricted<ElementType,BLOCK_SIZE,K>,hipFuncCachePreferShared);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
hipLaunchKernelGGL(( randomAccessConstRestricted<ElementType, BLOCK_SIZE, K>)
, dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_data.data().get(), d_result);
});
test.addMeassurement("randomAccessConstRestricted", st.median);
outstrm << test.bandwidth(st.median) << ",";
CUDA_SYNC_CHECK_ERROR();
}
// SAIGA_ASSERT(ref == d_result);
{
d_result = result;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
hipLaunchKernelGGL(( randomAccessLdg<ElementType, BLOCK_SIZE, K>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_result);
});
test.addMeassurement("randomAccessLdg", st.median);
outstrm << test.bandwidth(st.median) << ",";
CUDA_SYNC_CHECK_ERROR();
}
// SAIGA_ASSERT(ref == d_result);
#endif
{
hipBindTexture(0, dataTexture, d_data.data().get(), d_data.size() * sizeof(ElementType));
d_result = result;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() {hipLaunchKernelGGL(( randomAccessTexture<ElementType, BLOCK_SIZE, K>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data, d_result); });
test.addMeassurement("randomAccessTexture", st.median);
outstrm << test.bandwidth(st.median);
hipUnbindTexture(dataTexture);
CUDA_SYNC_CHECK_ERROR();
}
outstrm << std::endl;
return;
}
int main(int argc, char* argv[])
{
// hipDeviceSetCacheConfig(hipFuncCachePreferL1);
// hipDeviceSetCacheConfig(hipFuncCachePreferShared);
outstrm.open("out.csv");
outstrm << "size,simple,cr,ldg,texture" << std::endl;
#ifdef LECTURE
int start = 8;
int end = 9;
randomAccessTest2<int>(1 << 12, 1 * 1024 * 1024);
#else
int start = 8;
int end = 24;
for (int i = start; i < end; ++i)
{
randomAccessTest2<int>(1 << i, 1 * 1024 * 1024);
if (i > 0) randomAccessTest2<int>((1 << i) + (1 << (i - 1)), 1 * 1024 * 1024);
}
#endif
CUDA_SYNC_CHECK_ERROR();
return 0;
}
|
4618f4e3d94e1016c9eaf689e884f05f995de9f1.cu
|
/**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/memory.h"
#include "saiga/cuda/reduce.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/core/math/math.h"
#include <fstream>
#include <random>
using Saiga::ArrayView;
using Saiga::CUDA::ThreadInfo;
//#define LECTURE
std::ofstream outstrm;
HD inline uint32_t simpleRand(uint32_t state)
{
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
uint32_t x = state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
return x;
}
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessSimple(ArrayView<T> data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % data.size();
// sum += data[index];
sum += Saiga::CUDA::ldg(data.data() + index);
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
#ifndef LECTURE
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessConstRestricted(ArrayView<T> vdata, const T* __restrict__ data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % vdata.size();
sum += data[index];
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessLdg(ArrayView<T> data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % data.size();
sum += Saiga::CUDA::ldg(data.data() + index);
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
static texture<int, 1, cudaReadModeElementType> dataTexture;
template <typename T, unsigned int BLOCK_SIZE, unsigned int K>
__global__ static void randomAccessTexture(ArrayView<T> data, ArrayView<T> result)
{
ThreadInfo<BLOCK_SIZE> ti;
if (ti.thread_id >= result.size()) return;
uint32_t r = ti.thread_id * 17;
T sum = 0;
for (int i = 0; i < K; ++i)
{
r = simpleRand(r);
auto index = r % data.size();
sum += tex1Dfetch(dataTexture, index);
}
// Reduce the cache impact of the output array
sum = Saiga::CUDA::warpReduceSum<T>(sum);
if (ti.lane_id == 0) result[ti.warp_id] = sum;
}
#endif
template <typename ElementType>
void randomAccessTest2(int numIndices, int numElements)
{
const int K = 16;
outstrm << numIndices * sizeof(int) / 1024 << ",";
size_t readWrites = numElements * sizeof(ElementType) / 32 + numElements * sizeof(int) * K;
Saiga::CUDA::PerformanceTestHelper test("Coalesced processing test. numIndices: " + std::to_string(numIndices) +
" numElements: " + std::to_string(numElements),
readWrites);
thrust::host_vector<ElementType> data(numIndices);
thrust::host_vector<ElementType> result(numElements, 0);
thrust::host_vector<ElementType> ref(numElements);
thrust::device_vector<ElementType> d_data(data);
thrust::device_vector<ElementType> d_result(result);
int its = 50;
const int BLOCK_SIZE = 128;
const int BLOCKS = Saiga::CUDA::getBlockCount(numElements, BLOCK_SIZE);
{
d_result = result;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() { randomAccessSimple<ElementType, BLOCK_SIZE, K><<<BLOCKS, BLOCK_SIZE>>>(d_data, d_result); });
test.addMeassurement("randomAccessSimple", st.median);
outstrm << test.bandwidth(st.median) << ",";
CUDA_SYNC_CHECK_ERROR();
}
// SAIGA_ASSERT(ref == d_result);
#ifndef LECTURE
{
d_result = result;
// cudaFuncSetCacheConfig(randomAccessConstRestricted<ElementType,BLOCK_SIZE,K>,cudaFuncCachePreferShared);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
randomAccessConstRestricted<ElementType, BLOCK_SIZE, K>
<<<BLOCKS, BLOCK_SIZE>>>(d_data, d_data.data().get(), d_result);
});
test.addMeassurement("randomAccessConstRestricted", st.median);
outstrm << test.bandwidth(st.median) << ",";
CUDA_SYNC_CHECK_ERROR();
}
// SAIGA_ASSERT(ref == d_result);
{
d_result = result;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
randomAccessLdg<ElementType, BLOCK_SIZE, K><<<BLOCKS, BLOCK_SIZE>>>(d_data, d_result);
});
test.addMeassurement("randomAccessLdg", st.median);
outstrm << test.bandwidth(st.median) << ",";
CUDA_SYNC_CHECK_ERROR();
}
// SAIGA_ASSERT(ref == d_result);
#endif
{
cudaBindTexture(0, dataTexture, d_data.data().get(), d_data.size() * sizeof(ElementType));
d_result = result;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() { randomAccessTexture<ElementType, BLOCK_SIZE, K><<<BLOCKS, BLOCK_SIZE>>>(d_data, d_result); });
test.addMeassurement("randomAccessTexture", st.median);
outstrm << test.bandwidth(st.median);
cudaUnbindTexture(dataTexture);
CUDA_SYNC_CHECK_ERROR();
}
outstrm << std::endl;
return;
}
int main(int argc, char* argv[])
{
// cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
// cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
outstrm.open("out.csv");
outstrm << "size,simple,cr,ldg,texture" << std::endl;
#ifdef LECTURE
int start = 8;
int end = 9;
randomAccessTest2<int>(1 << 12, 1 * 1024 * 1024);
#else
int start = 8;
int end = 24;
for (int i = start; i < end; ++i)
{
randomAccessTest2<int>(1 << i, 1 * 1024 * 1024);
if (i > 0) randomAccessTest2<int>((1 << i) + (1 << (i - 1)), 1 * 1024 * 1024);
}
#endif
CUDA_SYNC_CHECK_ERROR();
return 0;
}
|
da7d27d38ee05debeb9fea4499effe8bbee10ab9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cross_entropy_error_function_updater_cuda.h"
#include "../cross_entropy_error_function.h"
namespace nnforge
{
namespace cuda
{
__forceinline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
extern __shared__ float arr_sh[];
template <bool multiple_blocks>
__global__ void cross_entropy_update_error_and_gradient_kernel(
float * __restrict gradients,
double * __restrict total_error,
const float * __restrict actual_output_neurons,
const float * __restrict predicted_output_neurons,
int output_entry_id,
int neuron_count,
int updater_entry_count)
{
int neuron_id = blockIdx.y * blockDim.x + threadIdx.x;
int updater_entry_id = blockIdx.x;
int offset = updater_entry_id * neuron_count + neuron_id;
float err = 0.0F;
if (neuron_id < neuron_count)
{
float actual_val = actual_output_neurons[output_entry_id * neuron_count + neuron_id];
float predicted_val = predicted_output_neurons[offset];
float gradient = 0.0F;
if (actual_val > 0.0F)
{
err = -actual_val * __logf(predicted_val);
gradient = __fdividef(actual_val, predicted_val);
}
if (actual_val < 1.0F)
{
err -= (1.0F - actual_val) * __logf(1.0F - predicted_val);
gradient -= __fdividef(1.0F - actual_val, 1.0F - predicted_val);
}
gradients[offset] = gradient;
}
int thread_id = threadIdx.x;
int lane_id = thread_id & 31;
#if __CUDA_ARCH__ < 300
volatile float * arr = arr_sh;
arr[thread_id] = err;
#endif
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#if __CUDA_ARCH__ < 300
if (lane_id < tx)
arr[thread_id] += arr[thread_id + tx];
#else
err += __shfl_down(err, tx);
#endif
}
#if __CUDA_ARCH__ < 300
err = arr[thread_id];
__syncthreads();
#endif
if (blockDim.x > 32)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
}
if (thread_id == 0)
{
for(int i = 1; i < (blockDim.x >> 5); ++i)
err += arr_sh[i];
double err_d = (double)err;
if (multiple_blocks)
{
atomicAdd(total_error + updater_entry_id, err_d);
}
else
{
total_error[updater_entry_id] += err_d;
}
}
}
cross_entropy_error_function_updater_cuda::cross_entropy_error_function_updater_cuda()
{
}
cross_entropy_error_function_updater_cuda::~cross_entropy_error_function_updater_cuda()
{
}
const boost::uuids::uuid& cross_entropy_error_function_updater_cuda::get_uuid() const
{
return cross_entropy_error_function::function_guid;
}
void cross_entropy_error_function_updater_cuda::enqueue_update_error_and_gradient(
hipStream_t stream_id,
cuda_linear_buffer_device_smart_ptr gradient_buffer,
cuda_linear_buffer_device_smart_ptr error_buffer,
const_cuda_linear_buffer_device_smart_ptr actual_output_buffer,
const_cuda_linear_buffer_device_smart_ptr predicted_output_buffer,
unsigned int input_entry_id,
unsigned int neuron_count,
unsigned int updater_entry_count) const
{
int threadblock_size = get_threadblock_size(neuron_count);
int block_count = (neuron_count + threadblock_size - 1) / threadblock_size;
dim3 grid_size(updater_entry_count, block_count, 1);
dim3 block_size(threadblock_size, 1, 1);
int smem_size = threadblock_size * sizeof(float);
if (block_count > 1)
hipLaunchKernelGGL(( cross_entropy_update_error_and_gradient_kernel<true>), dim3(grid_size), dim3(block_size), smem_size, stream_id,
*gradient_buffer,
*error_buffer,
*actual_output_buffer,
*predicted_output_buffer,
input_entry_id,
neuron_count,
updater_entry_count);
else
hipLaunchKernelGGL(( cross_entropy_update_error_and_gradient_kernel<false>), dim3(grid_size), dim3(block_size), smem_size, stream_id,
*gradient_buffer,
*error_buffer,
*actual_output_buffer,
*predicted_output_buffer,
input_entry_id,
neuron_count,
updater_entry_count);
}
int cross_entropy_error_function_updater_cuda::get_threadblock_size(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 256)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 256 - 1) / 256;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
da7d27d38ee05debeb9fea4499effe8bbee10ab9.cu
|
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cross_entropy_error_function_updater_cuda.h"
#include "../cross_entropy_error_function.h"
namespace nnforge
{
namespace cuda
{
__forceinline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
extern __shared__ float arr_sh[];
template <bool multiple_blocks>
__global__ void cross_entropy_update_error_and_gradient_kernel(
float * __restrict gradients,
double * __restrict total_error,
const float * __restrict actual_output_neurons,
const float * __restrict predicted_output_neurons,
int output_entry_id,
int neuron_count,
int updater_entry_count)
{
int neuron_id = blockIdx.y * blockDim.x + threadIdx.x;
int updater_entry_id = blockIdx.x;
int offset = updater_entry_id * neuron_count + neuron_id;
float err = 0.0F;
if (neuron_id < neuron_count)
{
float actual_val = actual_output_neurons[output_entry_id * neuron_count + neuron_id];
float predicted_val = predicted_output_neurons[offset];
float gradient = 0.0F;
if (actual_val > 0.0F)
{
err = -actual_val * __logf(predicted_val);
gradient = __fdividef(actual_val, predicted_val);
}
if (actual_val < 1.0F)
{
err -= (1.0F - actual_val) * __logf(1.0F - predicted_val);
gradient -= __fdividef(1.0F - actual_val, 1.0F - predicted_val);
}
gradients[offset] = gradient;
}
int thread_id = threadIdx.x;
int lane_id = thread_id & 31;
#if __CUDA_ARCH__ < 300
volatile float * arr = arr_sh;
arr[thread_id] = err;
#endif
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#if __CUDA_ARCH__ < 300
if (lane_id < tx)
arr[thread_id] += arr[thread_id + tx];
#else
err += __shfl_down(err, tx);
#endif
}
#if __CUDA_ARCH__ < 300
err = arr[thread_id];
__syncthreads();
#endif
if (blockDim.x > 32)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
}
if (thread_id == 0)
{
for(int i = 1; i < (blockDim.x >> 5); ++i)
err += arr_sh[i];
double err_d = (double)err;
if (multiple_blocks)
{
atomicAdd(total_error + updater_entry_id, err_d);
}
else
{
total_error[updater_entry_id] += err_d;
}
}
}
cross_entropy_error_function_updater_cuda::cross_entropy_error_function_updater_cuda()
{
}
cross_entropy_error_function_updater_cuda::~cross_entropy_error_function_updater_cuda()
{
}
const boost::uuids::uuid& cross_entropy_error_function_updater_cuda::get_uuid() const
{
return cross_entropy_error_function::function_guid;
}
void cross_entropy_error_function_updater_cuda::enqueue_update_error_and_gradient(
cudaStream_t stream_id,
cuda_linear_buffer_device_smart_ptr gradient_buffer,
cuda_linear_buffer_device_smart_ptr error_buffer,
const_cuda_linear_buffer_device_smart_ptr actual_output_buffer,
const_cuda_linear_buffer_device_smart_ptr predicted_output_buffer,
unsigned int input_entry_id,
unsigned int neuron_count,
unsigned int updater_entry_count) const
{
int threadblock_size = get_threadblock_size(neuron_count);
int block_count = (neuron_count + threadblock_size - 1) / threadblock_size;
dim3 grid_size(updater_entry_count, block_count, 1);
dim3 block_size(threadblock_size, 1, 1);
int smem_size = threadblock_size * sizeof(float);
if (block_count > 1)
cross_entropy_update_error_and_gradient_kernel<true><<<grid_size, block_size, smem_size, stream_id>>>(
*gradient_buffer,
*error_buffer,
*actual_output_buffer,
*predicted_output_buffer,
input_entry_id,
neuron_count,
updater_entry_count);
else
cross_entropy_update_error_and_gradient_kernel<false><<<grid_size, block_size, smem_size, stream_id>>>(
*gradient_buffer,
*error_buffer,
*actual_output_buffer,
*predicted_output_buffer,
input_entry_id,
neuron_count,
updater_entry_count);
}
int cross_entropy_error_function_updater_cuda::get_threadblock_size(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 256)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 256 - 1) / 256;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
1b4e430a4a34b3097e3044f22c086b19de3e582e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Christian Noboa Mardini <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/legacy/column_utils.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cudf/utilities/error.hpp>
#include <utilities/legacy/cuda_utils.hpp>
#include <utilities/legacy/bit_util.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <copying/legacy/slice.hpp>
namespace cudf {
namespace {
using bit_mask_t = bit_mask::bit_mask_t;
/**
* @brief Improve the readability of the source code.
* Parameter for the CUDA kernel.
*/
constexpr std::size_t NO_DYNAMIC_MEMORY = 0;
template <typename ColumnType>
__global__
void slice_data_kernel(ColumnType* output_data,
ColumnType const* input_data,
cudf::size_type const* indices,
cudf::size_type const indices_position) {
cudf::size_type input_offset = indices[indices_position*2]; /**< The start index position of the input data. */
cudf::size_type row_size = indices[indices_position*2 + 1] - input_offset;
// Calculate kernel parameters
cudf::size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < row_size) {
output_data[row_index] = input_data[input_offset + row_index];
row_index += row_step;
}
}
/** @brief This function copies a slice of a bitmask.
*
* If the slice is from element 10 to element 40, element 10 corresponds to bit 3 of the second byte,
* that bit needs to become bit 0. So we are reading two adjacent blocks and bitshifting them together,
* to then write one block. We also take care that if the last bits of a bit_mask_t block don't
* correspond to this slice, then we to apply a mask to clear those bits.
*/
__global__
void slice_bitmask_kernel(bit_mask_t* output_bitmask,
cudf::size_type* output_null_count,
bit_mask_t const* input_bitmask,
cudf::size_type const input_size,
cudf::size_type const* indices,
cudf::size_type const indices_size,
cudf::size_type const indices_position) {
// Obtain the indices for copying
cudf::size_type input_index_begin = indices[indices_position * 2];
cudf::size_type input_index_end = indices[indices_position * 2 + 1];
cudf::size_type input_offset = cudf::util::detail::bit_container_index<bit_mask_t, cudf::size_type>(input_index_begin);
cudf::size_type rotate_input = cudf::util::detail::intra_container_index<bit_mask_t, cudf::size_type>(input_index_begin);
bit_mask_t mask_last = (bit_mask_t{1} << ((input_index_end - input_index_begin) % bit_mask::bits_per_element)) - bit_mask_t{1};
cudf::size_type input_block_length = bit_mask::num_elements(input_size);
cudf::size_type partition_block_length = bit_mask::num_elements(input_index_end - input_index_begin);
// Calculate kernel parameters
cudf::size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < partition_block_length) {
// load data into one or two adjacent bitmask blocks
if (rotate_input == 0){
output_bitmask[row_index] = input_bitmask[input_offset + row_index];
} else {
bit_mask_t lower_value = input_bitmask[input_offset + row_index];
bit_mask_t upper_value = bit_mask_t{0};
if (row_index < (input_block_length - 1)) {
upper_value = input_bitmask[input_offset + row_index + 1];
}
// Perform rotation
output_bitmask[row_index] = __funnelshift_rc(lower_value, upper_value, rotate_input);
}
// Apply mask for the last value in the bitmask
if ((row_index == (partition_block_length - 1)) && mask_last) {
output_bitmask[row_index] &= mask_last;
}
// Perform null bitmask null count
std::uint32_t null_count_value = __popc(output_bitmask[row_index]); // Count the number of bits that are set to 1 in a 32 bit integer.
atomicAdd(output_null_count, null_count_value);
row_index += row_step;
}
}
class Slice {
public:
Slice(gdf_column const & input_column,
cudf::size_type const* indices,
cudf::size_type num_indices,
std::vector<gdf_column*> const & output_columns,
std::vector<hipStream_t> const & streams)
: input_column_(input_column), indices_(indices), num_indices_(num_indices),
output_columns_(output_columns), streams_(streams) { }
public:
template <typename ColumnType>
void operator()() {
cudf::size_type columns_quantity = output_columns_.size();
// Perform operation
for (cudf::size_type index = 0; index < columns_quantity; ++index) {
// Empty output column
if (output_columns_[index]->size == 0) {
continue;
}
// Create a new cuda variable for null count in the bitmask
rmm::device_vector<cudf::size_type> bit_set_counter(1, 0);
// Gather stream
hipStream_t stream = get_stream(index);
// Allocate Column
gdf_column* output_column = output_columns_[index];
auto col_width { cudf::byte_width(*output_column) };
RMM_TRY( RMM_ALLOC(&(output_column->data), col_width * output_column->size, stream) );
if(input_column_.valid != nullptr){
RMM_TRY( RMM_ALLOC(&(output_column->valid), sizeof(cudf::valid_type)*gdf_valid_allocation_size(output_column->size), stream) );
} else {
output_column->valid = nullptr;
}
// Configure grid for data kernel launch
auto data_grid_config = cudf::util::cuda::grid_config_1d(output_column->size, 256);
// Make a copy of the data in the gdf_column
hipLaunchKernelGGL(( slice_data_kernel<ColumnType>)
,
dim3(data_grid_config.num_blocks),
dim3(data_grid_config.num_threads_per_block),
NO_DYNAMIC_MEMORY,
stream
,
static_cast<ColumnType*>(output_column->data),
static_cast<ColumnType const*>(input_column_.data),
indices_,
index
);
if(input_column_.valid != nullptr){
// Configure grid for bit mask kernel launch
auto valid_grid_config = cudf::util::cuda::grid_config_1d(gdf_num_bitmask_elements(output_column->size), 256);
// Make a copy of the bitmask in the gdf_column
hipLaunchKernelGGL(( slice_bitmask_kernel)
,
dim3(valid_grid_config.num_blocks),
dim3(valid_grid_config.num_threads_per_block),
NO_DYNAMIC_MEMORY,
stream
,
reinterpret_cast<bit_mask_t*>(output_column->valid),
bit_set_counter.data().get(),
reinterpret_cast<bit_mask_t const*>(input_column_.valid),
input_column_.size,
indices_,
num_indices_,
index
);
CHECK_CUDA(stream);
// Update the other fields in the output column
cudf::size_type num_nulls;
CUDA_TRY(hipMemcpyAsync(&num_nulls, bit_set_counter.data().get(), sizeof(cudf::size_type),
hipMemcpyDeviceToHost, stream));
output_column->null_count = output_column->size - num_nulls;
} else {
output_column->null_count = 0;
}
if (output_column->dtype == GDF_STRING_CATEGORY){
CUDF_TRY(nvcategory_gather(output_column, static_cast<NVCategory*>(input_column_.dtype_info.category)));
}
}
}
private:
hipStream_t get_stream(cudf::size_type index) {
if (streams_.size() == 0) {
return hipStream_t{nullptr};
}
return streams_[index % streams_.size()];
}
gdf_column const input_column_;
cudf::size_type const* indices_;
cudf::size_type num_indices_;
std::vector<gdf_column*> const output_columns_;
std::vector<hipStream_t> streams_;
};
} // namespace
namespace detail {
std::vector<gdf_column*> slice(gdf_column const & input_column,
cudf::size_type const* indices,
cudf::size_type num_indices,
std::vector<hipStream_t> const & streams) {
std::vector<gdf_column*> output_columns;
if (num_indices == 0 || indices == nullptr) {
return output_columns;
}
if (input_column.size == 0) {
return output_columns;
}
CUDF_EXPECTS(input_column.data != nullptr, "input column data is null");
CUDF_EXPECTS((num_indices % 2) == 0, "indices size must be even");
// Get indexes on host side
std::vector<cudf::size_type> host_indices(num_indices);
CUDA_TRY( hipMemcpy(host_indices.data(), indices, num_indices * sizeof(cudf::size_type), hipMemcpyDeviceToHost) );
// Initialize output_columns
output_columns.resize(num_indices/2);
//TODO: optimize to launch all slices in parallel
for (cudf::size_type i = 0; i < num_indices/2; i++){
output_columns[i] = new gdf_column{};
gdf_column_view_augmented(output_columns[i],
nullptr,
nullptr,
host_indices[2*i + 1] - host_indices[2*i],
input_column.dtype,
0,
{input_column.dtype_info.time_unit, nullptr});
}
// Create slice helper class
Slice slice(input_column, indices, num_indices, output_columns, streams);
// Perform cudf operation
cudf::type_dispatcher(input_column.dtype, slice);
return output_columns;
}
} // namespace detail
std::vector<gdf_column*> slice(gdf_column const & input_column,
cudf::size_type const* indices,
cudf::size_type num_indices) {
return cudf::detail::slice(input_column, indices, num_indices);
}
} // namespace cudf
|
1b4e430a4a34b3097e3044f22c086b19de3e582e.cu
|
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Christian Noboa Mardini <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/legacy/column_utils.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cudf/utilities/error.hpp>
#include <utilities/legacy/cuda_utils.hpp>
#include <utilities/legacy/bit_util.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <copying/legacy/slice.hpp>
namespace cudf {
namespace {
using bit_mask_t = bit_mask::bit_mask_t;
/**
* @brief Improve the readability of the source code.
* Parameter for the CUDA kernel.
*/
constexpr std::size_t NO_DYNAMIC_MEMORY = 0;
template <typename ColumnType>
__global__
void slice_data_kernel(ColumnType* output_data,
ColumnType const* input_data,
cudf::size_type const* indices,
cudf::size_type const indices_position) {
cudf::size_type input_offset = indices[indices_position*2]; /**< The start index position of the input data. */
cudf::size_type row_size = indices[indices_position*2 + 1] - input_offset;
// Calculate kernel parameters
cudf::size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < row_size) {
output_data[row_index] = input_data[input_offset + row_index];
row_index += row_step;
}
}
/** @brief This function copies a slice of a bitmask.
*
* If the slice is from element 10 to element 40, element 10 corresponds to bit 3 of the second byte,
* that bit needs to become bit 0. So we are reading two adjacent blocks and bitshifting them together,
* to then write one block. We also take care that if the last bits of a bit_mask_t block don't
* correspond to this slice, then we to apply a mask to clear those bits.
*/
__global__
void slice_bitmask_kernel(bit_mask_t* output_bitmask,
cudf::size_type* output_null_count,
bit_mask_t const* input_bitmask,
cudf::size_type const input_size,
cudf::size_type const* indices,
cudf::size_type const indices_size,
cudf::size_type const indices_position) {
// Obtain the indices for copying
cudf::size_type input_index_begin = indices[indices_position * 2];
cudf::size_type input_index_end = indices[indices_position * 2 + 1];
cudf::size_type input_offset = cudf::util::detail::bit_container_index<bit_mask_t, cudf::size_type>(input_index_begin);
cudf::size_type rotate_input = cudf::util::detail::intra_container_index<bit_mask_t, cudf::size_type>(input_index_begin);
bit_mask_t mask_last = (bit_mask_t{1} << ((input_index_end - input_index_begin) % bit_mask::bits_per_element)) - bit_mask_t{1};
cudf::size_type input_block_length = bit_mask::num_elements(input_size);
cudf::size_type partition_block_length = bit_mask::num_elements(input_index_end - input_index_begin);
// Calculate kernel parameters
cudf::size_type row_index = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type row_step = blockDim.x * gridDim.x;
// Perform the copying operation
while (row_index < partition_block_length) {
// load data into one or two adjacent bitmask blocks
if (rotate_input == 0){
output_bitmask[row_index] = input_bitmask[input_offset + row_index];
} else {
bit_mask_t lower_value = input_bitmask[input_offset + row_index];
bit_mask_t upper_value = bit_mask_t{0};
if (row_index < (input_block_length - 1)) {
upper_value = input_bitmask[input_offset + row_index + 1];
}
// Perform rotation
output_bitmask[row_index] = __funnelshift_rc(lower_value, upper_value, rotate_input);
}
// Apply mask for the last value in the bitmask
if ((row_index == (partition_block_length - 1)) && mask_last) {
output_bitmask[row_index] &= mask_last;
}
// Perform null bitmask null count
std::uint32_t null_count_value = __popc(output_bitmask[row_index]); // Count the number of bits that are set to 1 in a 32 bit integer.
atomicAdd(output_null_count, null_count_value);
row_index += row_step;
}
}
class Slice {
public:
Slice(gdf_column const & input_column,
cudf::size_type const* indices,
cudf::size_type num_indices,
std::vector<gdf_column*> const & output_columns,
std::vector<cudaStream_t> const & streams)
: input_column_(input_column), indices_(indices), num_indices_(num_indices),
output_columns_(output_columns), streams_(streams) { }
public:
template <typename ColumnType>
void operator()() {
cudf::size_type columns_quantity = output_columns_.size();
// Perform operation
for (cudf::size_type index = 0; index < columns_quantity; ++index) {
// Empty output column
if (output_columns_[index]->size == 0) {
continue;
}
// Create a new cuda variable for null count in the bitmask
rmm::device_vector<cudf::size_type> bit_set_counter(1, 0);
// Gather stream
cudaStream_t stream = get_stream(index);
// Allocate Column
gdf_column* output_column = output_columns_[index];
auto col_width { cudf::byte_width(*output_column) };
RMM_TRY( RMM_ALLOC(&(output_column->data), col_width * output_column->size, stream) );
if(input_column_.valid != nullptr){
RMM_TRY( RMM_ALLOC(&(output_column->valid), sizeof(cudf::valid_type)*gdf_valid_allocation_size(output_column->size), stream) );
} else {
output_column->valid = nullptr;
}
// Configure grid for data kernel launch
auto data_grid_config = cudf::util::cuda::grid_config_1d(output_column->size, 256);
// Make a copy of the data in the gdf_column
slice_data_kernel<ColumnType>
<<<
data_grid_config.num_blocks,
data_grid_config.num_threads_per_block,
NO_DYNAMIC_MEMORY,
stream
>>>(
static_cast<ColumnType*>(output_column->data),
static_cast<ColumnType const*>(input_column_.data),
indices_,
index
);
if(input_column_.valid != nullptr){
// Configure grid for bit mask kernel launch
auto valid_grid_config = cudf::util::cuda::grid_config_1d(gdf_num_bitmask_elements(output_column->size), 256);
// Make a copy of the bitmask in the gdf_column
slice_bitmask_kernel
<<<
valid_grid_config.num_blocks,
valid_grid_config.num_threads_per_block,
NO_DYNAMIC_MEMORY,
stream
>>>(
reinterpret_cast<bit_mask_t*>(output_column->valid),
bit_set_counter.data().get(),
reinterpret_cast<bit_mask_t const*>(input_column_.valid),
input_column_.size,
indices_,
num_indices_,
index
);
CHECK_CUDA(stream);
// Update the other fields in the output column
cudf::size_type num_nulls;
CUDA_TRY(cudaMemcpyAsync(&num_nulls, bit_set_counter.data().get(), sizeof(cudf::size_type),
cudaMemcpyDeviceToHost, stream));
output_column->null_count = output_column->size - num_nulls;
} else {
output_column->null_count = 0;
}
if (output_column->dtype == GDF_STRING_CATEGORY){
CUDF_TRY(nvcategory_gather(output_column, static_cast<NVCategory*>(input_column_.dtype_info.category)));
}
}
}
private:
cudaStream_t get_stream(cudf::size_type index) {
if (streams_.size() == 0) {
return cudaStream_t{nullptr};
}
return streams_[index % streams_.size()];
}
gdf_column const input_column_;
cudf::size_type const* indices_;
cudf::size_type num_indices_;
std::vector<gdf_column*> const output_columns_;
std::vector<cudaStream_t> streams_;
};
} // namespace
namespace detail {
std::vector<gdf_column*> slice(gdf_column const & input_column,
cudf::size_type const* indices,
cudf::size_type num_indices,
std::vector<cudaStream_t> const & streams) {
std::vector<gdf_column*> output_columns;
if (num_indices == 0 || indices == nullptr) {
return output_columns;
}
if (input_column.size == 0) {
return output_columns;
}
CUDF_EXPECTS(input_column.data != nullptr, "input column data is null");
CUDF_EXPECTS((num_indices % 2) == 0, "indices size must be even");
// Get indexes on host side
std::vector<cudf::size_type> host_indices(num_indices);
CUDA_TRY( cudaMemcpy(host_indices.data(), indices, num_indices * sizeof(cudf::size_type), cudaMemcpyDeviceToHost) );
// Initialize output_columns
output_columns.resize(num_indices/2);
//TODO: optimize to launch all slices in parallel
for (cudf::size_type i = 0; i < num_indices/2; i++){
output_columns[i] = new gdf_column{};
gdf_column_view_augmented(output_columns[i],
nullptr,
nullptr,
host_indices[2*i + 1] - host_indices[2*i],
input_column.dtype,
0,
{input_column.dtype_info.time_unit, nullptr});
}
// Create slice helper class
Slice slice(input_column, indices, num_indices, output_columns, streams);
// Perform cudf operation
cudf::type_dispatcher(input_column.dtype, slice);
return output_columns;
}
} // namespace detail
std::vector<gdf_column*> slice(gdf_column const & input_column,
cudf::size_type const* indices,
cudf::size_type num_indices) {
return cudf::detail::slice(input_column, indices, num_indices);
}
} // namespace cudf
|
ba36018b0c193be3d42faa93abb1e1d3c7aa0ad1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/loss_layers.hpp"
#include "assert.h"
namespace caffe {
template <typename Dtype>
__device__ Dtype gpu_log_sum_exp(int length, int offset, int increment, const Dtype* data){
int kmin = offset;
int kmax = offset + length*increment;
Dtype total = 0.;
Dtype max = data[kmin];
for(int k = kmin+increment; k < kmax; k+=increment){
if (data[k] > max ) max = data[k];
}
for(int k = kmin; k < kmax; k+=increment){
total += exp(data[k] - max);
}
//rv[0] =
return ::log(total) + max;
}
template <typename Dtype>
__global__ void MILFrameArgMarginal(const int nthreads,
const Dtype* arg_data, Dtype* arg_marginal,
int total_args, int total_bbs, const int arg, const int length){
CUDA_KERNEL_LOOP(index, nthreads) {
//compute my argument index
//each thread is tasked with computing one marginal. those are stored batch x arg x bb
//index = (i*total_args*total_bb + a*total_bb + b)
//int i = index / (total_args * total_bbs);
//int a = (index / total_bbs ) % total_args;
//int b = index % total_bbs;
//index = i*total_bb + b
int i = index / total_bbs;
int b = index % total_bbs;
int value_offset = i*total_bbs*length + b*length;
int marginal_offset = i*total_args*total_bbs + arg*total_bbs + b;
arg_marginal[marginal_offset] = gpu_log_sum_exp(length, value_offset, 1, arg_data);
}
}
template <typename Dtype>
__global__ void MILFrameVerbMarginal(const int nthreads,
const Dtype* bottom_verb, const Dtype* arg_marginal, Dtype* verb_marginal,
const Dtype* verb_start, const Dtype* verb_length,
int total_frames, int total_bbs, int total_args){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_frames*total_bbs + f*total_bbs + b
int i = index / ( total_frames * total_bbs);
int f = ( index / total_bbs ) % total_frames;
int b = index % total_bbs;
int verb_offset = i*total_bbs*total_frames + b*total_frames + f;
int arg_offset = i*total_args*total_bbs + ((int)verb_start[f])*total_bbs + b;
//int verb_marginal_offset = index;//i*total_frames*total_bbs + f*total_bbs + b;
int nargs = (int)verb_length[f];
Dtype total = bottom_verb[verb_offset];
for( int k = 0; k < nargs; k++){
total+= arg_marginal[arg_offset + k*total_bbs];
}
verb_marginal[index] = total;
}
}
template <typename Dtype>
__global__ void MILFrameNorm(const int nthreads,
Dtype* norms, const Dtype* verb_marginal,
int total_frames, int total_bbs){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_bbs + b
int i = index / total_bbs;
int b = index % total_bbs;
int verb_offset = i*total_bbs*total_frames + b;
norms[index] = gpu_log_sum_exp(total_frames, verb_offset, total_bbs, verb_marginal);
}
}
template <typename Dtype>
__global__ void MILFrameRefScore(const int nthreads,
const Dtype * score, Dtype* ref_scores,
int gold_label, int i, int r, int length, int total_bbs, int references, bool clear){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*references*total_bbs + r*total_bbs + b
//int i = index / (references * total_bbs);
//int r = (index / total_bbs) % references;
int b = index;// % total_bbs;
int ref_offset = i*references*total_bbs + r*total_bbs + b;
int value_index = i*total_bbs*length + b*length + gold_label;
ref_scores[ref_offset] = (clear ? 0 : ref_scores[ref_offset]) + score[value_index];
}
}
template <typename Dtype>
__global__ void MILFramePosScores(const int nthreads,
const Dtype * norms, const Dtype* ref_scores, Dtype * pos_scores,
int total_bbs, int references){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*references + r
int i = index / references;
int r = index % references;
Dtype _not1 = log(0.);
int ref_offset = i*references*total_bbs + r*total_bbs;
int norm_offset = i*total_bbs;
for( int b = 0; b < total_bbs; b++){
Dtype ref = ref_scores[ref_offset + b];
Dtype norm = norms[norm_offset + b];
Dtype value = ref-norm;
//_not1 = _not1 + value - _not1*value;
if( _not1 > value)
_not1 = log( 1 + exp(value - _not1) - exp(value)) + _not1;
else
_not1 = log( 1 + exp(_not1 - value) - exp(_not1)) + value;
}
//pos scores now stores log of p
// _not1 = 1- log(v)
pos_scores[index] = _not1;
}
}
template <typename Dtype>
__global__ void MILFrameFillVerbMarginal(const int nthreads,
const Dtype* verb_marginal, const Dtype* norms, Dtype* output_scores, Dtype* output_structure,
int maxlabel, int total_frames, int total_bbs){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_frames + v
int i = index / total_frames;
int v = index % total_frames;
int offset = i*total_frames*total_bbs + v*total_bbs;
int norm_offset = i*total_bbs;
int score_offset = i*total_frames + v;
int structure_offset = i*total_frames*maxlabel + v*maxlabel;
Dtype not1 = 0.;
for(int b = 0; b < total_bbs; b++){
Dtype value = exp(verb_marginal[offset + b] - norms[norm_offset + b]);
not1 = not1 + value - not1*value;
}
output_scores[score_offset] = not1;
output_structure[structure_offset] = v;
}
}
template <typename Dtype>
__global__ void MILFrameFillScratch(const int nthreads,
const Dtype * arg_marginal, const Dtype * values, Dtype* scratch,
int arg, int length, int total_args, int total_bbs, int max_value){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*arg_length + v
int i = index / length;
int v = index % length;
int value_offset = i*total_bbs*length + v;
int marginal_offset = i*total_args*total_bbs + arg*total_bbs;
Dtype not1 = 0.;
//we need to look up the value for all bbs
for(int b = 0; b < total_bbs; b++){
Dtype value = exp(values[value_offset + b*length] - arg_marginal[marginal_offset + b]);
not1 = not1 + value - value*not1;
}
scratch[i*total_args*max_value + arg*max_value + v] = not1;
}
}
template <typename Dtype>
__global__ void MILFrameScratchMaxValue(const int nthreads,
const Dtype * scratch,const Dtype* arg_length, Dtype* max_scratch,
int total_args, int max_value){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_arguments + a
int i = index / total_args;
int a = index % total_args;
int value_offset = i*total_args*max_value + a*max_value;
int length = (int)arg_length[a];
Dtype mv = 0.;
int mi = 0;
for(int v = 0; v < length; v++){
Dtype value = scratch[value_offset + v];
if( value >= mv) {
mv = value;
mi = v;
}
}
max_scratch[index] = mi;
}
}
template <typename Dtype>
__global__ void MILFrameBackward(const int nthreads,
const Dtype* total_scores, const Dtype * ref_scores,const Dtype* norms, const Dtype * verb_marginal, const Dtype * labels, Dtype * diff,
int maxlabel, int total_frames, int total_bbs, int references){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_bbs*total_frames + b*total_frames + f
int i = index / (total_bbs * total_frames);
int b = ( index / total_frames) % total_bbs;
int f = index % total_frames;
Dtype bb_norm = norms[i*total_bbs + b];
Dtype image_score = total_scores[i];
Dtype _verb_marginal = verb_marginal[i*total_bbs*total_frames + f*total_bbs + b];
Dtype g = 0.;
for(int r = 0; r < references; r++){
int gold_verb = labels[i*references*maxlabel + r*maxlabel];
Dtype score = ref_scores[i*total_bbs*references + r*total_bbs + b] - bb_norm;
//p(all zero) * (p(x = 1) / (p(x = 0))* p( at least 1))
//Dtype top_scalar = exp(log(1-exp(image_score)) - log(1 - exp(score)) + score);
//Dtype bottom_scalar = exp(image_score);
//Dtype scalar = -top_scalar/ bottom_scalar;
Dtype scalar = - exp(log(1-exp(image_score)) - image_score + score - log(1 - exp(score)));
if( scalar != scalar ) scalar = 0;
if( image_score == 0 && score == 0 ) scalar = 0;
//if( scalar > 1 ) scalar = 1;
g += scalar * ( (f == gold_verb ? 1 : 0) - exp(_verb_marginal - bb_norm) );
}
diff[index] = g;
}
}
template <typename Dtype>
__global__ void MILArgBackward(const int nthreads,
const Dtype* total_scores, const Dtype * ref_scores, const Dtype* norms, const Dtype * verb_marginal, const Dtype* arg_marginal, const Dtype * labels, const Dtype * values, Dtype * diff,
int maxlabel, int total_frames, int total_args, int total_bbs, int references, int arg_length, int f, int a, int arg){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_bbs*arg_length + b*arg_length + v
int i = index / (total_bbs * arg_length);
int b = (index / arg_length) % total_bbs;
int v = index % arg_length;
Dtype bb_norm = norms[i*total_bbs + b];
Dtype image_score = total_scores[i];
Dtype _verb_marginal = verb_marginal[i*total_bbs*total_frames + f*total_bbs + b];
Dtype _arg_marginal = arg_marginal[i*total_bbs*total_args + a*total_bbs + b];
Dtype value_score = values[index];
int label_offset = i*references*maxlabel;
Dtype expected = exp( _verb_marginal - _arg_marginal + value_score - bb_norm);
Dtype g = 0.;
for(int r = 0; r < references; r++){
int gold_verb = labels[label_offset + r*maxlabel];
int gold_value = labels[label_offset + r*maxlabel + arg + 1];
Dtype score = ref_scores[i*total_bbs*references + r*total_bbs + b] - bb_norm;
//Dtype top_scalar = exp(log(1-exp(image_score)) - log(1 - exp(score)) + score);
//Dtype bottom_scalar = exp(image_score);
//Dtype scalar = -top_scalar/ bottom_scalar;
Dtype scalar = - exp(log(1-exp(image_score)) - image_score + score - log(1 - exp(score)));
if( scalar != scalar ) scalar = 0;
if( image_score == 0 && score == 0 ) scalar = 0;
//if( scalar > 1 ) scalar = 1;
g+= scalar * ( ( f == gold_verb && v == gold_value ? 1 : 0 ) - expected);
}
diff[index] = g;
}
}
template <typename Dtype>
void MILFrameLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int batch_size = bottom[this->label_index]->shape(0);
// LOG(INFO) << "MIL START FORWARD " << batch_size << " " << total_bbs;
//compute arg marginal
int am_jobs = batch_size*total_bbs;
for(int a = 0; a < total_args; a++){
hipLaunchKernelGGL(( MILFrameArgMarginal<Dtype>), dim3(CAFFE_GET_BLOCKS(am_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, am_jobs,
bottom[a]->gpu_data(), arg_marginal.mutable_gpu_data(),
total_args, total_bbs, a, arg_structure[a]);
CUDA_POST_KERNEL_CHECK;
}
//compute verb marginal
int vm_jobs = batch_size*total_frames*total_bbs;
hipLaunchKernelGGL(( MILFrameVerbMarginal<Dtype>), dim3(CAFFE_GET_BLOCKS(vm_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
vm_jobs, bottom[verb_index]->gpu_data(), arg_marginal.gpu_data(),verb_marginal.mutable_gpu_data(),
b_verb_start.gpu_data(), b_verb_length.gpu_data(),
total_frames, total_bbs, total_args);
CUDA_POST_KERNEL_CHECK;
//compute the norm
int n_jobs = batch_size*total_bbs;
hipLaunchKernelGGL(( MILFrameNorm<Dtype>), dim3(CAFFE_GET_BLOCKS(n_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_jobs, norms.mutable_gpu_data(), verb_marginal.gpu_data(),
total_frames, total_bbs);
CUDA_POST_KERNEL_CHECK;
//compute ref scores... very irritating way to avoid sending a pointer to the args
//honestly, for low bbs setting, this has extremely low parallism and likely
//just avoids copying the previous layer to cpu.
int r_jobs = total_bbs;
const Dtype* label = bottom[label_index]->cpu_data();
for(int i = 0; i < batch_size; i++){
for(int r = 0; r < references; r++){
int label_offset = i*maxlabel*references + r*maxlabel;
int gold_verb = label[label_offset];
hipLaunchKernelGGL(( MILFrameRefScore<Dtype>), dim3(CAFFE_GET_BLOCKS(r_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
r_jobs, bottom[verb_index]->gpu_data(), ref_scores.mutable_gpu_data(),
gold_verb, i, r, total_frames, total_bbs, references, true);
CUDA_POST_KERNEL_CHECK;
int vlength = verb_length[gold_verb];
int arg_offset = verb_start[gold_verb];
for(int a = 0; a < vlength; a++){
int arg_index = arg_offset + a;
int arg_length = arg_structure[arg_index];
int gold_value = label[label_offset + a + 1];
hipLaunchKernelGGL(( MILFrameRefScore<Dtype>), dim3(CAFFE_GET_BLOCKS(r_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
r_jobs, bottom[arg_index]->gpu_data(), ref_scores.mutable_gpu_data(),
gold_value, i, r, arg_length, total_bbs, references, false);
CUDA_POST_KERNEL_CHECK;
}
}
}
//compute positive scores
int p_jobs = batch_size * this->references;
hipLaunchKernelGGL(( MILFramePosScores<Dtype>), dim3(CAFFE_GET_BLOCKS(p_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
p_jobs,
norms.gpu_data(), ref_scores.gpu_data(), pos_scores.mutable_gpu_data(),
total_bbs, references);
CUDA_POST_KERNEL_CHECK;
const Dtype* pos_scores = this->pos_scores.cpu_data();
Dtype* total_scores = this->total_scores.mutable_cpu_data();
Dtype total_score = 0;
for(int i = 0; i < batch_size; i++){
Dtype not1 = log(0.);
for(int r = 0; r < references; r++){
Dtype value = pos_scores[i*references + r]; //pos scores [ i] = v where p(1 -prod( log(v) )
//LOG(INFO) << i << "," << r << ":" << value;
if(value > 0 ) LOG(INFO) << "POS SCORE PROB GREATER THAN 1:" << value;
if( value != value) LOG(INFO) << "NAN value:" << value;
if( not1 > value)
not1 = log( 1 + exp(value - not1) - exp(value)) + not1;
else
not1 = log( 1 + exp(not1 - value) - exp(not1)) + value;
// not1 = not1 + value - not1*value;
}
//not1 = ::log(not1);
if(not1 != not1) LOG(INFO) << "NOT 1 NAN";
if(not1 > 0){
LOG(INFO) << "NOT1 PROB GREATER THAN 1:" << not1;
not1 = 0;
}
total_score += not1;
total_scores[i] = not1;
}
if(total_score != total_score) LOG(INFO) << "Total score nan" << total_score;
top[0]->mutable_cpu_data()[0] = (total_score)/batch_size;
//LOG(INFO) << "MIL END FORWARD";
if(this->mode == -1) return; //we aren't going to do inference, and settle in for just probabilty
if(this->mode == 0) this->PredictMarginalFrameGPU(bottom, top);
}
template <typename Dtype>
void MILFrameLossLayer<Dtype>::PredictMarginalFrameGPU(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
//LOG(INFO) << "MIL START PREDICT";
int batch_size = bottom[this->label_index]->shape(0);
//LOG(INFO)<< "verb marginal...";
//get the bb marginal in
int v_jobs = batch_size*total_frames;
hipLaunchKernelGGL(( MILFrameFillVerbMarginal<Dtype>), dim3(CAFFE_GET_BLOCKS(v_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, v_jobs,
verb_marginal.gpu_data(), norms.gpu_data(), top[2]->mutable_gpu_data(), top[1]->mutable_gpu_data(),
maxlabel, total_frames, total_bbs);
CUDA_POST_KERNEL_CHECK;
//LOG(INFO)<< "done.\n" << "scratch fill...";
//compute the pre value marginal
for(int a = 0; a < total_args; a++){
int s_jobs = batch_size * arg_structure[a];
hipLaunchKernelGGL(( MILFrameFillScratch<Dtype>), dim3(CAFFE_GET_BLOCKS(s_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, s_jobs,
arg_marginal.gpu_data(), bottom[a]->gpu_data(), scratch.mutable_gpu_data(),
a, arg_structure[a], total_args, total_bbs, max_value);
CUDA_POST_KERNEL_CHECK;
}
//LOG(INFO) << "done.\n" << "max value...";
int m_jobs = batch_size*total_args;
//compute the max over the marginal
hipLaunchKernelGGL(( MILFrameScratchMaxValue<Dtype>), dim3(CAFFE_GET_BLOCKS(m_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, m_jobs,
scratch.gpu_data(), b_arg_structure.gpu_data(), max_scratch.mutable_gpu_data(),
total_args, max_value);
CUDA_POST_KERNEL_CHECK;
//LOG(INFO) << "done.";
//this could be on gpu, but we need the actual output back anyways.
const Dtype* max_scratch = this->max_scratch.cpu_data();
Dtype* structure_output = top[1]->mutable_cpu_data();
//we need to copy max data to output
for( int i = 0; i < batch_size; i++){
for(int f = 0; f < total_frames; f++){
int total_arg = verb_length[f];
int arg_offset = verb_start[f];
int offset = i*total_frames*maxlabel + f*maxlabel;
int arg_max_offset = i*total_args;
for( int a = 0; a < total_arg; a++){
int arg_index = arg_offset + a;
structure_output[offset + a + 1] = max_scratch[arg_max_offset + arg_index];
}
}
}
//LOG(INFO) << "MIL END PREDICT";
}
template <typename Dtype>
void MILFrameLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO) << "BACKWARD START";
int batch_size = bottom[this->label_index]->shape(0);
const Dtype * labels = bottom[label_index]->gpu_data();
int f_jobs = batch_size * total_bbs * total_frames;
hipLaunchKernelGGL(( MILFrameBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(f_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, f_jobs,
total_scores.gpu_data(), ref_scores.gpu_data(), norms.gpu_data(), verb_marginal.gpu_data(), labels, bottom[verb_index]->mutable_gpu_diff(),
maxlabel, total_frames, total_bbs, references);
CUDA_POST_KERNEL_CHECK;
/*
const Dtype * vdiff = bottom[verb_index]->cpu_diff();
for(int i = 0 ; i < f_jobs; i++){
if(vdiff[i] > 1.0 || vdiff[i] < -1.0)
LOG(INFO) << "VDIFF ERROR: " << vdiff[i];
}
*/
for(int f = 0; f < total_frames; f++){
int arg_offset = verb_start[f];
for( int arg = 0; arg < verb_length[f]; arg++){
int arg_index = arg_offset + arg;
int arg_length = arg_structure[arg_index];
int a_jobs = batch_size * total_bbs * arg_length;
hipLaunchKernelGGL(( MILArgBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(a_jobs)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, a_jobs,
total_scores.gpu_data(), ref_scores.gpu_data(),norms.gpu_data(), verb_marginal.gpu_data(), arg_marginal.gpu_data(), labels, bottom[arg_index]->gpu_data(), bottom[arg_index]->mutable_gpu_diff(),
maxlabel, total_frames, total_args, total_bbs, references, arg_length, f, arg_index, arg);
CUDA_POST_KERNEL_CHECK;
/*
const Dtype * vdiff = bottom[arg_index]->cpu_diff();
for(int i = 0 ; i < a_jobs; i++){
if(vdiff[i] > 1.0 || vdiff[i] < -1.0) { LOG(INFO) << "ADIFF ERROR: " << arg_index << " " << vdiff[i];}
}
*/
}
}
//LOG(INFO) << "BACKWARD END";
}
template void MILFrameLossLayer<float>::PredictMarginalFrameGPU( \
const std::vector<Blob<float>*>& bottom, \
const std::vector<Blob<float>*>& top); \
template void MILFrameLossLayer<double>::PredictMarginalFrameGPU( \
const std::vector<Blob<double>*>& bottom, \
const std::vector<Blob<double>*>& top);
INSTANTIATE_LAYER_GPU_FUNCS(MILFrameLossLayer);
}
|
ba36018b0c193be3d42faa93abb1e1d3c7aa0ad1.cu
|
#include "caffe/loss_layers.hpp"
#include "assert.h"
namespace caffe {
template <typename Dtype>
__device__ Dtype gpu_log_sum_exp(int length, int offset, int increment, const Dtype* data){
int kmin = offset;
int kmax = offset + length*increment;
Dtype total = 0.;
Dtype max = data[kmin];
for(int k = kmin+increment; k < kmax; k+=increment){
if (data[k] > max ) max = data[k];
}
for(int k = kmin; k < kmax; k+=increment){
total += exp(data[k] - max);
}
//rv[0] =
return std::log(total) + max;
}
template <typename Dtype>
__global__ void MILFrameArgMarginal(const int nthreads,
const Dtype* arg_data, Dtype* arg_marginal,
int total_args, int total_bbs, const int arg, const int length){
CUDA_KERNEL_LOOP(index, nthreads) {
//compute my argument index
//each thread is tasked with computing one marginal. those are stored batch x arg x bb
//index = (i*total_args*total_bb + a*total_bb + b)
//int i = index / (total_args * total_bbs);
//int a = (index / total_bbs ) % total_args;
//int b = index % total_bbs;
//index = i*total_bb + b
int i = index / total_bbs;
int b = index % total_bbs;
int value_offset = i*total_bbs*length + b*length;
int marginal_offset = i*total_args*total_bbs + arg*total_bbs + b;
arg_marginal[marginal_offset] = gpu_log_sum_exp(length, value_offset, 1, arg_data);
}
}
template <typename Dtype>
__global__ void MILFrameVerbMarginal(const int nthreads,
const Dtype* bottom_verb, const Dtype* arg_marginal, Dtype* verb_marginal,
const Dtype* verb_start, const Dtype* verb_length,
int total_frames, int total_bbs, int total_args){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_frames*total_bbs + f*total_bbs + b
int i = index / ( total_frames * total_bbs);
int f = ( index / total_bbs ) % total_frames;
int b = index % total_bbs;
int verb_offset = i*total_bbs*total_frames + b*total_frames + f;
int arg_offset = i*total_args*total_bbs + ((int)verb_start[f])*total_bbs + b;
//int verb_marginal_offset = index;//i*total_frames*total_bbs + f*total_bbs + b;
int nargs = (int)verb_length[f];
Dtype total = bottom_verb[verb_offset];
for( int k = 0; k < nargs; k++){
total+= arg_marginal[arg_offset + k*total_bbs];
}
verb_marginal[index] = total;
}
}
template <typename Dtype>
__global__ void MILFrameNorm(const int nthreads,
Dtype* norms, const Dtype* verb_marginal,
int total_frames, int total_bbs){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_bbs + b
int i = index / total_bbs;
int b = index % total_bbs;
int verb_offset = i*total_bbs*total_frames + b;
norms[index] = gpu_log_sum_exp(total_frames, verb_offset, total_bbs, verb_marginal);
}
}
template <typename Dtype>
__global__ void MILFrameRefScore(const int nthreads,
const Dtype * score, Dtype* ref_scores,
int gold_label, int i, int r, int length, int total_bbs, int references, bool clear){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*references*total_bbs + r*total_bbs + b
//int i = index / (references * total_bbs);
//int r = (index / total_bbs) % references;
int b = index;// % total_bbs;
int ref_offset = i*references*total_bbs + r*total_bbs + b;
int value_index = i*total_bbs*length + b*length + gold_label;
ref_scores[ref_offset] = (clear ? 0 : ref_scores[ref_offset]) + score[value_index];
}
}
template <typename Dtype>
__global__ void MILFramePosScores(const int nthreads,
const Dtype * norms, const Dtype* ref_scores, Dtype * pos_scores,
int total_bbs, int references){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*references + r
int i = index / references;
int r = index % references;
Dtype _not1 = log(0.);
int ref_offset = i*references*total_bbs + r*total_bbs;
int norm_offset = i*total_bbs;
for( int b = 0; b < total_bbs; b++){
Dtype ref = ref_scores[ref_offset + b];
Dtype norm = norms[norm_offset + b];
Dtype value = ref-norm;
//_not1 = _not1 + value - _not1*value;
if( _not1 > value)
_not1 = log( 1 + exp(value - _not1) - exp(value)) + _not1;
else
_not1 = log( 1 + exp(_not1 - value) - exp(_not1)) + value;
}
//pos scores now stores log of p
// _not1 = 1- log(v)
pos_scores[index] = _not1;
}
}
template <typename Dtype>
__global__ void MILFrameFillVerbMarginal(const int nthreads,
const Dtype* verb_marginal, const Dtype* norms, Dtype* output_scores, Dtype* output_structure,
int maxlabel, int total_frames, int total_bbs){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_frames + v
int i = index / total_frames;
int v = index % total_frames;
int offset = i*total_frames*total_bbs + v*total_bbs;
int norm_offset = i*total_bbs;
int score_offset = i*total_frames + v;
int structure_offset = i*total_frames*maxlabel + v*maxlabel;
Dtype not1 = 0.;
for(int b = 0; b < total_bbs; b++){
Dtype value = exp(verb_marginal[offset + b] - norms[norm_offset + b]);
not1 = not1 + value - not1*value;
}
output_scores[score_offset] = not1;
output_structure[structure_offset] = v;
}
}
template <typename Dtype>
__global__ void MILFrameFillScratch(const int nthreads,
const Dtype * arg_marginal, const Dtype * values, Dtype* scratch,
int arg, int length, int total_args, int total_bbs, int max_value){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*arg_length + v
int i = index / length;
int v = index % length;
int value_offset = i*total_bbs*length + v;
int marginal_offset = i*total_args*total_bbs + arg*total_bbs;
Dtype not1 = 0.;
//we need to look up the value for all bbs
for(int b = 0; b < total_bbs; b++){
Dtype value = exp(values[value_offset + b*length] - arg_marginal[marginal_offset + b]);
not1 = not1 + value - value*not1;
}
scratch[i*total_args*max_value + arg*max_value + v] = not1;
}
}
template <typename Dtype>
__global__ void MILFrameScratchMaxValue(const int nthreads,
const Dtype * scratch,const Dtype* arg_length, Dtype* max_scratch,
int total_args, int max_value){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_arguments + a
int i = index / total_args;
int a = index % total_args;
int value_offset = i*total_args*max_value + a*max_value;
int length = (int)arg_length[a];
Dtype mv = 0.;
int mi = 0;
for(int v = 0; v < length; v++){
Dtype value = scratch[value_offset + v];
if( value >= mv) {
mv = value;
mi = v;
}
}
max_scratch[index] = mi;
}
}
template <typename Dtype>
__global__ void MILFrameBackward(const int nthreads,
const Dtype* total_scores, const Dtype * ref_scores,const Dtype* norms, const Dtype * verb_marginal, const Dtype * labels, Dtype * diff,
int maxlabel, int total_frames, int total_bbs, int references){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_bbs*total_frames + b*total_frames + f
int i = index / (total_bbs * total_frames);
int b = ( index / total_frames) % total_bbs;
int f = index % total_frames;
Dtype bb_norm = norms[i*total_bbs + b];
Dtype image_score = total_scores[i];
Dtype _verb_marginal = verb_marginal[i*total_bbs*total_frames + f*total_bbs + b];
Dtype g = 0.;
for(int r = 0; r < references; r++){
int gold_verb = labels[i*references*maxlabel + r*maxlabel];
Dtype score = ref_scores[i*total_bbs*references + r*total_bbs + b] - bb_norm;
//p(all zero) * (p(x = 1) / (p(x = 0))* p( at least 1))
//Dtype top_scalar = exp(log(1-exp(image_score)) - log(1 - exp(score)) + score);
//Dtype bottom_scalar = exp(image_score);
//Dtype scalar = -top_scalar/ bottom_scalar;
Dtype scalar = - exp(log(1-exp(image_score)) - image_score + score - log(1 - exp(score)));
if( scalar != scalar ) scalar = 0;
if( image_score == 0 && score == 0 ) scalar = 0;
//if( scalar > 1 ) scalar = 1;
g += scalar * ( (f == gold_verb ? 1 : 0) - exp(_verb_marginal - bb_norm) );
}
diff[index] = g;
}
}
template <typename Dtype>
__global__ void MILArgBackward(const int nthreads,
const Dtype* total_scores, const Dtype * ref_scores, const Dtype* norms, const Dtype * verb_marginal, const Dtype* arg_marginal, const Dtype * labels, const Dtype * values, Dtype * diff,
int maxlabel, int total_frames, int total_args, int total_bbs, int references, int arg_length, int f, int a, int arg){
CUDA_KERNEL_LOOP(index, nthreads) {
//index = i*total_bbs*arg_length + b*arg_length + v
int i = index / (total_bbs * arg_length);
int b = (index / arg_length) % total_bbs;
int v = index % arg_length;
Dtype bb_norm = norms[i*total_bbs + b];
Dtype image_score = total_scores[i];
Dtype _verb_marginal = verb_marginal[i*total_bbs*total_frames + f*total_bbs + b];
Dtype _arg_marginal = arg_marginal[i*total_bbs*total_args + a*total_bbs + b];
Dtype value_score = values[index];
int label_offset = i*references*maxlabel;
Dtype expected = exp( _verb_marginal - _arg_marginal + value_score - bb_norm);
Dtype g = 0.;
for(int r = 0; r < references; r++){
int gold_verb = labels[label_offset + r*maxlabel];
int gold_value = labels[label_offset + r*maxlabel + arg + 1];
Dtype score = ref_scores[i*total_bbs*references + r*total_bbs + b] - bb_norm;
//Dtype top_scalar = exp(log(1-exp(image_score)) - log(1 - exp(score)) + score);
//Dtype bottom_scalar = exp(image_score);
//Dtype scalar = -top_scalar/ bottom_scalar;
Dtype scalar = - exp(log(1-exp(image_score)) - image_score + score - log(1 - exp(score)));
if( scalar != scalar ) scalar = 0;
if( image_score == 0 && score == 0 ) scalar = 0;
//if( scalar > 1 ) scalar = 1;
g+= scalar * ( ( f == gold_verb && v == gold_value ? 1 : 0 ) - expected);
}
diff[index] = g;
}
}
template <typename Dtype>
void MILFrameLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int batch_size = bottom[this->label_index]->shape(0);
// LOG(INFO) << "MIL START FORWARD " << batch_size << " " << total_bbs;
//compute arg marginal
int am_jobs = batch_size*total_bbs;
for(int a = 0; a < total_args; a++){
MILFrameArgMarginal<Dtype><<<CAFFE_GET_BLOCKS(am_jobs), CAFFE_CUDA_NUM_THREADS>>>(am_jobs,
bottom[a]->gpu_data(), arg_marginal.mutable_gpu_data(),
total_args, total_bbs, a, arg_structure[a]);
CUDA_POST_KERNEL_CHECK;
}
//compute verb marginal
int vm_jobs = batch_size*total_frames*total_bbs;
MILFrameVerbMarginal<Dtype><<<CAFFE_GET_BLOCKS(vm_jobs), CAFFE_CUDA_NUM_THREADS>>>(
vm_jobs, bottom[verb_index]->gpu_data(), arg_marginal.gpu_data(),verb_marginal.mutable_gpu_data(),
b_verb_start.gpu_data(), b_verb_length.gpu_data(),
total_frames, total_bbs, total_args);
CUDA_POST_KERNEL_CHECK;
//compute the norm
int n_jobs = batch_size*total_bbs;
MILFrameNorm<Dtype><<<CAFFE_GET_BLOCKS(n_jobs), CAFFE_CUDA_NUM_THREADS>>>(
n_jobs, norms.mutable_gpu_data(), verb_marginal.gpu_data(),
total_frames, total_bbs);
CUDA_POST_KERNEL_CHECK;
//compute ref scores... very irritating way to avoid sending a pointer to the args
//honestly, for low bbs setting, this has extremely low parallism and likely
//just avoids copying the previous layer to cpu.
int r_jobs = total_bbs;
const Dtype* label = bottom[label_index]->cpu_data();
for(int i = 0; i < batch_size; i++){
for(int r = 0; r < references; r++){
int label_offset = i*maxlabel*references + r*maxlabel;
int gold_verb = label[label_offset];
MILFrameRefScore<Dtype><<<CAFFE_GET_BLOCKS(r_jobs), CAFFE_CUDA_NUM_THREADS>>>(
r_jobs, bottom[verb_index]->gpu_data(), ref_scores.mutable_gpu_data(),
gold_verb, i, r, total_frames, total_bbs, references, true);
CUDA_POST_KERNEL_CHECK;
int vlength = verb_length[gold_verb];
int arg_offset = verb_start[gold_verb];
for(int a = 0; a < vlength; a++){
int arg_index = arg_offset + a;
int arg_length = arg_structure[arg_index];
int gold_value = label[label_offset + a + 1];
MILFrameRefScore<Dtype><<<CAFFE_GET_BLOCKS(r_jobs), CAFFE_CUDA_NUM_THREADS>>>(
r_jobs, bottom[arg_index]->gpu_data(), ref_scores.mutable_gpu_data(),
gold_value, i, r, arg_length, total_bbs, references, false);
CUDA_POST_KERNEL_CHECK;
}
}
}
//compute positive scores
int p_jobs = batch_size * this->references;
MILFramePosScores<Dtype><<<CAFFE_GET_BLOCKS(p_jobs), CAFFE_CUDA_NUM_THREADS>>>(
p_jobs,
norms.gpu_data(), ref_scores.gpu_data(), pos_scores.mutable_gpu_data(),
total_bbs, references);
CUDA_POST_KERNEL_CHECK;
const Dtype* pos_scores = this->pos_scores.cpu_data();
Dtype* total_scores = this->total_scores.mutable_cpu_data();
Dtype total_score = 0;
for(int i = 0; i < batch_size; i++){
Dtype not1 = log(0.);
for(int r = 0; r < references; r++){
Dtype value = pos_scores[i*references + r]; //pos scores [ i] = v where p(1 -prod( log(v) )
//LOG(INFO) << i << "," << r << ":" << value;
if(value > 0 ) LOG(INFO) << "POS SCORE PROB GREATER THAN 1:" << value;
if( value != value) LOG(INFO) << "NAN value:" << value;
if( not1 > value)
not1 = log( 1 + exp(value - not1) - exp(value)) + not1;
else
not1 = log( 1 + exp(not1 - value) - exp(not1)) + value;
// not1 = not1 + value - not1*value;
}
//not1 = std::log(not1);
if(not1 != not1) LOG(INFO) << "NOT 1 NAN";
if(not1 > 0){
LOG(INFO) << "NOT1 PROB GREATER THAN 1:" << not1;
not1 = 0;
}
total_score += not1;
total_scores[i] = not1;
}
if(total_score != total_score) LOG(INFO) << "Total score nan" << total_score;
top[0]->mutable_cpu_data()[0] = (total_score)/batch_size;
//LOG(INFO) << "MIL END FORWARD";
if(this->mode == -1) return; //we aren't going to do inference, and settle in for just probabilty
if(this->mode == 0) this->PredictMarginalFrameGPU(bottom, top);
}
template <typename Dtype>
void MILFrameLossLayer<Dtype>::PredictMarginalFrameGPU(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
//LOG(INFO) << "MIL START PREDICT";
int batch_size = bottom[this->label_index]->shape(0);
//LOG(INFO)<< "verb marginal...";
//get the bb marginal in
int v_jobs = batch_size*total_frames;
MILFrameFillVerbMarginal<Dtype><<<CAFFE_GET_BLOCKS(v_jobs), CAFFE_CUDA_NUM_THREADS>>>( v_jobs,
verb_marginal.gpu_data(), norms.gpu_data(), top[2]->mutable_gpu_data(), top[1]->mutable_gpu_data(),
maxlabel, total_frames, total_bbs);
CUDA_POST_KERNEL_CHECK;
//LOG(INFO)<< "done.\n" << "scratch fill...";
//compute the pre value marginal
for(int a = 0; a < total_args; a++){
int s_jobs = batch_size * arg_structure[a];
MILFrameFillScratch<Dtype><<<CAFFE_GET_BLOCKS(s_jobs), CAFFE_CUDA_NUM_THREADS>>>(s_jobs,
arg_marginal.gpu_data(), bottom[a]->gpu_data(), scratch.mutable_gpu_data(),
a, arg_structure[a], total_args, total_bbs, max_value);
CUDA_POST_KERNEL_CHECK;
}
//LOG(INFO) << "done.\n" << "max value...";
int m_jobs = batch_size*total_args;
//compute the max over the marginal
MILFrameScratchMaxValue<Dtype><<<CAFFE_GET_BLOCKS(m_jobs), CAFFE_CUDA_NUM_THREADS>>>(m_jobs,
scratch.gpu_data(), b_arg_structure.gpu_data(), max_scratch.mutable_gpu_data(),
total_args, max_value);
CUDA_POST_KERNEL_CHECK;
//LOG(INFO) << "done.";
//this could be on gpu, but we need the actual output back anyways.
const Dtype* max_scratch = this->max_scratch.cpu_data();
Dtype* structure_output = top[1]->mutable_cpu_data();
//we need to copy max data to output
for( int i = 0; i < batch_size; i++){
for(int f = 0; f < total_frames; f++){
int total_arg = verb_length[f];
int arg_offset = verb_start[f];
int offset = i*total_frames*maxlabel + f*maxlabel;
int arg_max_offset = i*total_args;
for( int a = 0; a < total_arg; a++){
int arg_index = arg_offset + a;
structure_output[offset + a + 1] = max_scratch[arg_max_offset + arg_index];
}
}
}
//LOG(INFO) << "MIL END PREDICT";
}
template <typename Dtype>
void MILFrameLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO) << "BACKWARD START";
int batch_size = bottom[this->label_index]->shape(0);
const Dtype * labels = bottom[label_index]->gpu_data();
int f_jobs = batch_size * total_bbs * total_frames;
MILFrameBackward<Dtype><<<CAFFE_GET_BLOCKS(f_jobs), CAFFE_CUDA_NUM_THREADS>>>(f_jobs,
total_scores.gpu_data(), ref_scores.gpu_data(), norms.gpu_data(), verb_marginal.gpu_data(), labels, bottom[verb_index]->mutable_gpu_diff(),
maxlabel, total_frames, total_bbs, references);
CUDA_POST_KERNEL_CHECK;
/*
const Dtype * vdiff = bottom[verb_index]->cpu_diff();
for(int i = 0 ; i < f_jobs; i++){
if(vdiff[i] > 1.0 || vdiff[i] < -1.0)
LOG(INFO) << "VDIFF ERROR: " << vdiff[i];
}
*/
for(int f = 0; f < total_frames; f++){
int arg_offset = verb_start[f];
for( int arg = 0; arg < verb_length[f]; arg++){
int arg_index = arg_offset + arg;
int arg_length = arg_structure[arg_index];
int a_jobs = batch_size * total_bbs * arg_length;
MILArgBackward<Dtype><<<CAFFE_GET_BLOCKS(a_jobs), CAFFE_CUDA_NUM_THREADS>>>(a_jobs,
total_scores.gpu_data(), ref_scores.gpu_data(),norms.gpu_data(), verb_marginal.gpu_data(), arg_marginal.gpu_data(), labels, bottom[arg_index]->gpu_data(), bottom[arg_index]->mutable_gpu_diff(),
maxlabel, total_frames, total_args, total_bbs, references, arg_length, f, arg_index, arg);
CUDA_POST_KERNEL_CHECK;
/*
const Dtype * vdiff = bottom[arg_index]->cpu_diff();
for(int i = 0 ; i < a_jobs; i++){
if(vdiff[i] > 1.0 || vdiff[i] < -1.0) { LOG(INFO) << "ADIFF ERROR: " << arg_index << " " << vdiff[i];}
}
*/
}
}
//LOG(INFO) << "BACKWARD END";
}
template void MILFrameLossLayer<float>::PredictMarginalFrameGPU( \
const std::vector<Blob<float>*>& bottom, \
const std::vector<Blob<float>*>& top); \
template void MILFrameLossLayer<double>::PredictMarginalFrameGPU( \
const std::vector<Blob<double>*>& bottom, \
const std::vector<Blob<double>*>& top);
INSTANTIATE_LAYER_GPU_FUNCS(MILFrameLossLayer);
}
|
83f9971cfc1f04d12132a4ab37829824ed7878a9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "CAIPLib.h"
#pragma comment(lib, "CAIPLib.lib")
using namespace std;
int main(int argc, char** argv)
{
caChar8* pFilename = NULL;
E_EDGE_DETECTOR_TYPE detectorType = EDT_DEFAULT;
caUInt8 threshold = 22;
TImage image;
if (argc != 7)
{
cout << "Incorrect count of arguments\n"
<< "Please, check out a format of input data.\n"
<< "It should be -f filename -t type_of_detector -q threshold_value\n";
return -1;
}
else if (argc == 7)
{
caUInt8 i = 1;
while (i < argc)
{
if (strcmp(argv[i], "-f") == 0)
{
i++;
pFilename = argv[i];
}
else if (strcmp(argv[i], "-t") == 0)
{
i++;
if (strcmp(argv[i], "lumdiff") == 0)
detectorType = EDT_LUM_DIFF;
else if (strcmp(argv[i], "adv_lumdiff") == 0)
detectorType = EDT_ADV_LUM_DIFF;
else if (strcmp(argv[i], "grad") == 0)
detectorType = EDT_GRADIENT;
else
{
cout << "Unknown type of detector\n";
return -1;
}
}
else if (strcmp(argv[i], "-q") == 0)
{
i++;
threshold = atoi(argv[i]);
}
i++;
}
}
if (caCheckError(caLoadImage(pFilename, image)))
return -1;
if (caCheckError(caRgb2Gray(image, image)))
return -1;
if (caCheckError(caEdges(detectorType, image, image, threshold)))
return -1;
if (caCheckError(caSaveImage("result.tga\0", image)))
return -1;
if (caCheckError(caFreeImage(image)))
return -1;
system("pause");
}
|
83f9971cfc1f04d12132a4ab37829824ed7878a9.cu
|
#include <iostream>
#include <cuda_runtime.h>
#include "CAIPLib.h"
#pragma comment(lib, "CAIPLib.lib")
using namespace std;
int main(int argc, char** argv)
{
caChar8* pFilename = NULL;
E_EDGE_DETECTOR_TYPE detectorType = EDT_DEFAULT;
caUInt8 threshold = 22;
TImage image;
if (argc != 7)
{
cout << "Incorrect count of arguments\n"
<< "Please, check out a format of input data.\n"
<< "It should be -f filename -t type_of_detector -q threshold_value\n";
return -1;
}
else if (argc == 7)
{
caUInt8 i = 1;
while (i < argc)
{
if (strcmp(argv[i], "-f") == 0)
{
i++;
pFilename = argv[i];
}
else if (strcmp(argv[i], "-t") == 0)
{
i++;
if (strcmp(argv[i], "lumdiff") == 0)
detectorType = EDT_LUM_DIFF;
else if (strcmp(argv[i], "adv_lumdiff") == 0)
detectorType = EDT_ADV_LUM_DIFF;
else if (strcmp(argv[i], "grad") == 0)
detectorType = EDT_GRADIENT;
else
{
cout << "Unknown type of detector\n";
return -1;
}
}
else if (strcmp(argv[i], "-q") == 0)
{
i++;
threshold = atoi(argv[i]);
}
i++;
}
}
if (caCheckError(caLoadImage(pFilename, image)))
return -1;
if (caCheckError(caRgb2Gray(image, image)))
return -1;
if (caCheckError(caEdges(detectorType, image, image, threshold)))
return -1;
if (caCheckError(caSaveImage("result.tga\0", image)))
return -1;
if (caCheckError(caFreeImage(image)))
return -1;
system("pause");
}
|
f9184cea0d2dbbe91248dd048242f53bb502518d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000)
__device__
void ztrsv_lower_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB[ 2 ];
magmaDoubleComplex rA[ 2 ];
int n;
int k;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (k = 0; k < N; k++)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn > k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void ztrsv_upper_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB[ 2 ];
magmaDoubleComplex rA[ 2 ];
int n;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (int k = N-1; k > -1; k--)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void ztrsv_lower_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 1; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 2; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 3; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 4; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 5; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 6; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 7; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 8; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 9; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 10; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 11; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 12; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 13; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 14; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 15; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 16; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 17; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 18; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 19; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 20; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 21; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 22; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 23; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 24; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 25; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 26; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 27; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 28; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 29; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 30; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 31; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void ztrsv_lower_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
ztrsv_lower_32kernel_1( dA, dB ); break;
case 2:
ztrsv_lower_32kernel_2( dA, dB ); break;
case 3:
ztrsv_lower_32kernel_3( dA, dB ); break;
case 4:
ztrsv_lower_32kernel_4( dA, dB ); break;
case 5:
ztrsv_lower_32kernel_5( dA, dB ); break;
case 6:
ztrsv_lower_32kernel_6( dA, dB ); break;
case 7:
ztrsv_lower_32kernel_7( dA, dB ); break;
case 8:
ztrsv_lower_32kernel_8( dA, dB ); break;
case 9:
ztrsv_lower_32kernel_9( dA, dB ); break;
case 10:
ztrsv_lower_32kernel_10( dA, dB ); break;
case 11:
ztrsv_lower_32kernel_11( dA, dB ); break;
case 12:
ztrsv_lower_32kernel_12( dA, dB ); break;
case 13:
ztrsv_lower_32kernel_13( dA, dB ); break;
case 14:
ztrsv_lower_32kernel_14( dA, dB ); break;
case 15:
ztrsv_lower_32kernel_15( dA, dB ); break;
case 16:
ztrsv_lower_32kernel_16( dA, dB ); break;
case 17:
ztrsv_lower_32kernel_17( dA, dB ); break;
case 18:
ztrsv_lower_32kernel_18( dA, dB ); break;
case 19:
ztrsv_lower_32kernel_19( dA, dB ); break;
case 20:
ztrsv_lower_32kernel_20( dA, dB ); break;
case 21:
ztrsv_lower_32kernel_21( dA, dB ); break;
case 22:
ztrsv_lower_32kernel_22( dA, dB ); break;
case 23:
ztrsv_lower_32kernel_23( dA, dB ); break;
case 24:
ztrsv_lower_32kernel_24( dA, dB ); break;
case 25:
ztrsv_lower_32kernel_25( dA, dB ); break;
case 26:
ztrsv_lower_32kernel_26( dA, dB ); break;
case 27:
ztrsv_lower_32kernel_27( dA, dB ); break;
case 28:
ztrsv_lower_32kernel_28( dA, dB ); break;
case 29:
ztrsv_lower_32kernel_29( dA, dB ); break;
case 30:
ztrsv_lower_32kernel_30( dA, dB ); break;
case 31:
ztrsv_lower_32kernel_31( dA, dB ); break;
case 32:
ztrsv_lower_32kernel_32( dA, dB ); break;
default:
ztrsv_lower_32kernel_general( dA, dB, sizes ); break;
}
}
}
__device__
void ztrsv_upper_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 1-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 2-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 3-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 4-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 5-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 6-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 7-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 8-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 9-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 10-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 11-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 12-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 13-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 14-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 15-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 16-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 17-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 18-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 19-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 20-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 21-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 22-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 23-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 24-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 25-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 26-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 27-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 28-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 29-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 30-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 31-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void ztrsv_upper_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
ztrsv_upper_32kernel_1( dA, dB ); break;
case 2:
ztrsv_upper_32kernel_2( dA, dB ); break;
case 3:
ztrsv_upper_32kernel_3( dA, dB ); break;
case 4:
ztrsv_upper_32kernel_4( dA, dB ); break;
case 5:
ztrsv_upper_32kernel_5( dA, dB ); break;
case 6:
ztrsv_upper_32kernel_6( dA, dB ); break;
case 7:
ztrsv_upper_32kernel_7( dA, dB ); break;
case 8:
ztrsv_upper_32kernel_8( dA, dB ); break;
case 9:
ztrsv_upper_32kernel_9( dA, dB ); break;
case 10:
ztrsv_upper_32kernel_10( dA, dB ); break;
case 11:
ztrsv_upper_32kernel_11( dA, dB ); break;
case 12:
ztrsv_upper_32kernel_12( dA, dB ); break;
case 13:
ztrsv_upper_32kernel_13( dA, dB ); break;
case 14:
ztrsv_upper_32kernel_14( dA, dB ); break;
case 15:
ztrsv_upper_32kernel_15( dA, dB ); break;
case 16:
ztrsv_upper_32kernel_16( dA, dB ); break;
case 17:
ztrsv_upper_32kernel_17( dA, dB ); break;
case 18:
ztrsv_upper_32kernel_18( dA, dB ); break;
case 19:
ztrsv_upper_32kernel_19( dA, dB ); break;
case 20:
ztrsv_upper_32kernel_20( dA, dB ); break;
case 21:
ztrsv_upper_32kernel_21( dA, dB ); break;
case 22:
ztrsv_upper_32kernel_22( dA, dB ); break;
case 23:
ztrsv_upper_32kernel_23( dA, dB ); break;
case 24:
ztrsv_upper_32kernel_24( dA, dB ); break;
case 25:
ztrsv_upper_32kernel_25( dA, dB ); break;
case 26:
ztrsv_upper_32kernel_26( dA, dB ); break;
case 27:
ztrsv_upper_32kernel_27( dA, dB ); break;
case 28:
ztrsv_upper_32kernel_28( dA, dB ); break;
case 29:
ztrsv_upper_32kernel_29( dA, dB ); break;
case 30:
ztrsv_upper_32kernel_30( dA, dB ); break;
case 31:
ztrsv_upper_32kernel_31( dA, dB ); break;
case 32:
ztrsv_upper_32kernel_32( dA, dB ); break;
default:
ztrsv_upper_32kernel_general( dA, dB, sizes ); break;
}
}
}
// initialize arrays with zero
__global__ void
magma_zgpumemzero_32kernel(
magmaDoubleComplex * d,
int n,
int dim_x,
int dim_y )
{
int i = blockIdx.y * gridDim.x + blockIdx.x;
int idx = threadIdx.x;
if( i >= n ){
return;
}
if( idx >= dim_x ){
return;
}
for( int j=0; j<dim_y; j++)
d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_Z_MAKE( 0.0, 0.0 );
}
__global__ void
magma_zlocations_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_zlocations_trunc_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ];
}
}// kernel
__global__ void
magma_zlocations_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_zlocations_trunc_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_zfilltrisystems_32kernel(
magma_int_t offset,
magma_int_t limit,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset;
int ii = (blockDim.x * blockIdx.x + threadIdx.x);
if ( ii>=limit ){
return;
}
//if ( i<offset ){
// return;
//}
for( int j=0; j<sizes[ i ]; j++ ){// no need for first
int k = row[ locations[ j+i*WARP_SIZE ] ];
int l = i*WARP_SIZE;
int idx = 0;
while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == col[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ]
= val[ k ];
k++;
l++;
idx++;
} else if( col[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
// printf("increment l\n");
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}// kernel
__global__ void
magma_zbackinsert_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int end = sizes[j];
if( j >= n ){
return;
}
if ( i>=end ){
return;
}
val[row[j]+i] = rhs[j*WARP_SIZE+i];
}// kernel
// try to do everything in shared memory and registers!
//one thread block per row of A
__global__ void
magma_zlowertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
magmaDoubleComplex *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA;
__shared__ magmaDoubleComplex dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_Z_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j<size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_zuppertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
magmaDoubleComplex *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA;
__shared__ magmaDoubleComplex dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_Z_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j < size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_zlowertrisystems_32kernel(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
magmaDoubleComplex *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA;
magmaDoubleComplex dA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j ] = MAGMA_Z_ZERO;
}
// for debuggging: let thred 0 do everything
//if(tid==0){
{
// first: generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
if( tid == idx ){
dA[ j ] = Aval[ k ];
}
//__syncthreads();
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
// not sure whether we need this here....
//__syncthreads();
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[ k ];
if (k%WARP_SIZE == tid)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_zuppertrisystems_32kernel(
magma_int_t n,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
int size = mlim - mstart;
if( tid >= size ){
return;
}
// set rA to 0
for( int j=0; j<32; j++ ){
rA[ j ] = MAGMA_Z_ZERO;
}
// generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int t = Mcol[ mstart + j ];
int k = Arow[ t ];
int l = mstart;
int idx = 0;
while( k < Arow[ t+1 ] && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
if( tid == idx ){
rA[ j ] = Aval[ k ];
}
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
if (k%32 == tid)
rB /= rA[k];
magmaDoubleComplex bottom = __shfl(rB, k%32);
if ( tid < k)
rB -= (bottom*rA[k]);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}// kernel
#endif // CUDA >= 7000
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_z_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_z_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems magmaDoubleComplex*
trisystems
@param[out]
rhs magmaDoubleComplex*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zisaigenerator_32_gpu(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_z_matrix L,
magma_z_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// routine 1
int r1bs1 = WARP_SIZE;
int r1bs2 = 1;
int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
dim3 r1block( r1bs1, r1bs2, 1 );
dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = WARP_SIZE;
int r2bs2 = 1;
int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 );
int r2dg2 = 1;
int r2dg3 = 1;
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
int r3bs1 = WARP_SIZE;
int r3bs2 = 1;
int r3dg1 = magma_ceildiv( 32000, r2bs1 );
int r3dg2 = 1;
int r3dg3 = 1;
dim3 r3block( r3bs1, r3bs2, 1 );
dim3 r3grid( r3dg1, r3dg2, r3dg3 );
int recursive = magma_ceildiv( M->num_rows, 32000 );
if (arch >= 300) {
hipLaunchKernelGGL(( magma_zgpumemzero_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
rhs, L.num_rows, WARP_SIZE, 1);
if (uplotype == MagmaLower) {
hipLaunchKernelGGL(( magma_zlocations_lower_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
else {
hipLaunchKernelGGL(( magma_zlocations_upper_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
/*
if (uplotype == MagmaLower) {
printf("in here lower\n");
magma_zlowertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval,
sizes,
locations );
}
else {
printf("in here upper\n");
magma_zuppertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval );
}
*/
// chunk it recursively into batches of 3200
for (int z=0; z < recursive; z++) {
int limit = min(32000, L.num_rows-32000*z);
hipLaunchKernelGGL(( magma_zgpumemzero_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems, limit, WARP_SIZE, WARP_SIZE );
hipLaunchKernelGGL(( magma_zfilltrisystems_32kernel), dim3(r3grid), dim3(r3block), 0, queue->cuda_stream() ,
32000*z,
limit,
L.drow,
L.dcol,
L.dval,
sizes,
locations,
trisystems,
rhs );
// routine 2
if (uplotype == MagmaLower) {
hipLaunchKernelGGL(( ztrsv_lower_32kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
else {
hipLaunchKernelGGL(( ztrsv_upper_32kernel_switch), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
}
// routine 3
hipLaunchKernelGGL(( magma_zbackinsert_32kernel), dim3(r1grid), dim3(r1block), 0, queue->cuda_stream() ,
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
rhs );
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
f9184cea0d2dbbe91248dd048242f53bb502518d.cu
|
/*
-- MAGMA (version 1.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000)
__device__
void ztrsv_lower_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB[ 2 ];
magmaDoubleComplex rA[ 2 ];
int n;
int k;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (k = 0; k < N; k++)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn > k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void ztrsv_upper_32kernel_general(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes)
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB[ 2 ];
magmaDoubleComplex rA[ 2 ];
int n;
int N = sizes[j];
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
#pragma unroll
for (n = 0; n < 2; n++)
rB[n] = dB[n*WARP_SIZE+idn];
// Triangular solve in regs.
#pragma unroll
for (int k = N-1; k > -1; k--)
{
#pragma unroll
for (n = 0; n < 2; n++)
rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB[k/WARP_SIZE] /= rA[k/WARP_SIZE];
magmaDoubleComplex top = __shfl(rB[k/WARP_SIZE], k%WARP_SIZE);
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < k)
rB[n] -= (top*rA[n]);
}
// Drop B to dev mem.
#pragma unroll
for (n = 0; n < 2; n++)
if (n*WARP_SIZE+idn < N)
dB[n*WARP_SIZE+idn] = rB[n];
#endif
}
__device__
void ztrsv_lower_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 1; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 2; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 3; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 4; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 5; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 6; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 7; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 8; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 9; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 10; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 11; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 12; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 13; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 14; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 15; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 16; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 17; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 18; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 19; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 20; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 21; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 22; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 23; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 24; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 25; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 26; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 27; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 28; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 29; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 30; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 31; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_lower_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( idn > k)
rB -= (top*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void ztrsv_lower_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
ztrsv_lower_32kernel_1( dA, dB ); break;
case 2:
ztrsv_lower_32kernel_2( dA, dB ); break;
case 3:
ztrsv_lower_32kernel_3( dA, dB ); break;
case 4:
ztrsv_lower_32kernel_4( dA, dB ); break;
case 5:
ztrsv_lower_32kernel_5( dA, dB ); break;
case 6:
ztrsv_lower_32kernel_6( dA, dB ); break;
case 7:
ztrsv_lower_32kernel_7( dA, dB ); break;
case 8:
ztrsv_lower_32kernel_8( dA, dB ); break;
case 9:
ztrsv_lower_32kernel_9( dA, dB ); break;
case 10:
ztrsv_lower_32kernel_10( dA, dB ); break;
case 11:
ztrsv_lower_32kernel_11( dA, dB ); break;
case 12:
ztrsv_lower_32kernel_12( dA, dB ); break;
case 13:
ztrsv_lower_32kernel_13( dA, dB ); break;
case 14:
ztrsv_lower_32kernel_14( dA, dB ); break;
case 15:
ztrsv_lower_32kernel_15( dA, dB ); break;
case 16:
ztrsv_lower_32kernel_16( dA, dB ); break;
case 17:
ztrsv_lower_32kernel_17( dA, dB ); break;
case 18:
ztrsv_lower_32kernel_18( dA, dB ); break;
case 19:
ztrsv_lower_32kernel_19( dA, dB ); break;
case 20:
ztrsv_lower_32kernel_20( dA, dB ); break;
case 21:
ztrsv_lower_32kernel_21( dA, dB ); break;
case 22:
ztrsv_lower_32kernel_22( dA, dB ); break;
case 23:
ztrsv_lower_32kernel_23( dA, dB ); break;
case 24:
ztrsv_lower_32kernel_24( dA, dB ); break;
case 25:
ztrsv_lower_32kernel_25( dA, dB ); break;
case 26:
ztrsv_lower_32kernel_26( dA, dB ); break;
case 27:
ztrsv_lower_32kernel_27( dA, dB ); break;
case 28:
ztrsv_lower_32kernel_28( dA, dB ); break;
case 29:
ztrsv_lower_32kernel_29( dA, dB ); break;
case 30:
ztrsv_lower_32kernel_30( dA, dB ); break;
case 31:
ztrsv_lower_32kernel_31( dA, dB ); break;
case 32:
ztrsv_lower_32kernel_32( dA, dB ); break;
default:
ztrsv_lower_32kernel_general( dA, dB, sizes ); break;
}
}
}
__device__
void ztrsv_upper_32kernel_1(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 1-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_2(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 2-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_3(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 3-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_4(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 4-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_5(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 5-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_6(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 6-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_7(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 7-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_8(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 8-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_9(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 9-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_10(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 10-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_11(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 11-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_12(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 12-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_13(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 13-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_14(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 14-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_15(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 15-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_16(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 16-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_17(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 17-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_18(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 18-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_19(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 19-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_20(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 20-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_21(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 21-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_22(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 22-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_23(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 23-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_24(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 24-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_25(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 25-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_26(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 26-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_27(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 27-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_28(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 28-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_29(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 29-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_30(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 30-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_31(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 31-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__device__
void ztrsv_upper_32kernel_32(magmaDoubleComplex *dA, magmaDoubleComplex *dB )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int j = blockIdx.y * gridDim.x + blockIdx.x;
int idn = threadIdx.x;
magmaDoubleComplex rB;
magmaDoubleComplex rA;
dA += (j)*WARP_SIZE*WARP_SIZE;
dB += (j)*WARP_SIZE;
// Read B to regs.
rB = dB[idn];
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+idn];
if (k%WARP_SIZE == idn)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( idn < k)
rB -= (bottom*rA);
}
// Drop B to dev mem.
dB[idn] = rB;
#endif
}
__global__
void ztrsv_upper_32kernel_switch(magmaDoubleComplex *dA, magmaDoubleComplex *dB, int *sizes, int num_rows )
{
int j = blockIdx.y * gridDim.x + blockIdx.x;
if (j < num_rows) {
int N = sizes[j];
switch( N ) {
case 1:
ztrsv_upper_32kernel_1( dA, dB ); break;
case 2:
ztrsv_upper_32kernel_2( dA, dB ); break;
case 3:
ztrsv_upper_32kernel_3( dA, dB ); break;
case 4:
ztrsv_upper_32kernel_4( dA, dB ); break;
case 5:
ztrsv_upper_32kernel_5( dA, dB ); break;
case 6:
ztrsv_upper_32kernel_6( dA, dB ); break;
case 7:
ztrsv_upper_32kernel_7( dA, dB ); break;
case 8:
ztrsv_upper_32kernel_8( dA, dB ); break;
case 9:
ztrsv_upper_32kernel_9( dA, dB ); break;
case 10:
ztrsv_upper_32kernel_10( dA, dB ); break;
case 11:
ztrsv_upper_32kernel_11( dA, dB ); break;
case 12:
ztrsv_upper_32kernel_12( dA, dB ); break;
case 13:
ztrsv_upper_32kernel_13( dA, dB ); break;
case 14:
ztrsv_upper_32kernel_14( dA, dB ); break;
case 15:
ztrsv_upper_32kernel_15( dA, dB ); break;
case 16:
ztrsv_upper_32kernel_16( dA, dB ); break;
case 17:
ztrsv_upper_32kernel_17( dA, dB ); break;
case 18:
ztrsv_upper_32kernel_18( dA, dB ); break;
case 19:
ztrsv_upper_32kernel_19( dA, dB ); break;
case 20:
ztrsv_upper_32kernel_20( dA, dB ); break;
case 21:
ztrsv_upper_32kernel_21( dA, dB ); break;
case 22:
ztrsv_upper_32kernel_22( dA, dB ); break;
case 23:
ztrsv_upper_32kernel_23( dA, dB ); break;
case 24:
ztrsv_upper_32kernel_24( dA, dB ); break;
case 25:
ztrsv_upper_32kernel_25( dA, dB ); break;
case 26:
ztrsv_upper_32kernel_26( dA, dB ); break;
case 27:
ztrsv_upper_32kernel_27( dA, dB ); break;
case 28:
ztrsv_upper_32kernel_28( dA, dB ); break;
case 29:
ztrsv_upper_32kernel_29( dA, dB ); break;
case 30:
ztrsv_upper_32kernel_30( dA, dB ); break;
case 31:
ztrsv_upper_32kernel_31( dA, dB ); break;
case 32:
ztrsv_upper_32kernel_32( dA, dB ); break;
default:
ztrsv_upper_32kernel_general( dA, dB, sizes ); break;
}
}
}
// initialize arrays with zero
__global__ void
magma_zgpumemzero_32kernel(
magmaDoubleComplex * d,
int n,
int dim_x,
int dim_y )
{
int i = blockIdx.y * gridDim.x + blockIdx.x;
int idx = threadIdx.x;
if( i >= n ){
return;
}
if( idx >= dim_x ){
return;
}
for( int j=0; j<dim_y; j++)
d[ i*dim_x*dim_y + j*dim_y + idx ] = MAGMA_Z_MAKE( 0.0, 0.0 );
}
__global__ void
magma_zlocations_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_zlocations_trunc_lower_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE ] = MAGMA_Z_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j+1]-BLOCKSIZE+i ];
}
}// kernel
__global__ void
magma_zlocations_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_zlocations_trunc_upper_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
if( j >= n ){
return;
}
int start = row[j];
int end = row[j+1];
int count = end-start;
// normal case
if( count <= BLOCKSIZE ){ // normal case
if( i == 0 ){
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE;
}
if ( i<count ){
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}
else {
// truncate in this row to the blocksize,
// take only the 32 elements close to the main diagonal into account
count = BLOCKSIZE;
if (i == 0) {
sizes[j] = count;
rhs[ j*WARP_SIZE+count-1 ] = MAGMA_Z_ONE;
}
locations[ j*WARP_SIZE + i ] = col[ row[j]+i ];
}
}// kernel
__global__ void
magma_zfilltrisystems_32kernel(
magma_int_t offset,
magma_int_t limit,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs )
{
int i = (blockDim.x * blockIdx.x + threadIdx.x)+offset;
int ii = (blockDim.x * blockIdx.x + threadIdx.x);
if ( ii>=limit ){
return;
}
//if ( i<offset ){
// return;
//}
for( int j=0; j<sizes[ i ]; j++ ){// no need for first
int k = row[ locations[ j+i*WARP_SIZE ] ];
int l = i*WARP_SIZE;
int idx = 0;
while( k < row[ locations[ j+i*WARP_SIZE ]+1 ] && l < (i+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == col[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
trisystems[ ii*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx ]
= val[ k ];
k++;
l++;
idx++;
} else if( col[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
// printf("increment l\n");
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}// kernel
__global__ void
magma_zbackinsert_32kernel(
magma_int_t n,
magma_index_t *row,
magma_index_t *col,
magmaDoubleComplex *val,
magma_index_t *sizes,
magmaDoubleComplex *rhs )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
int end = sizes[j];
if( j >= n ){
return;
}
if ( i>=end ){
return;
}
val[row[j]+i] = rhs[j*WARP_SIZE+i];
}// kernel
// try to do everything in shared memory and registers!
//one thread block per row of A
__global__ void
magma_zlowertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
magmaDoubleComplex *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA;
__shared__ magmaDoubleComplex dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_Z_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j<size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_zuppertrisystems_32kernel_s(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
magmaDoubleComplex *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA;
__shared__ magmaDoubleComplex dA[32*32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j*32 + tid ] = MAGMA_Z_ZERO;
}
/*
// for debuggging: let thred 0 do everything
if (tid == 0) {
// first: generate the triangular systems
for (int j=0; j < size; j++) { // no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while (k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE) { // stop once this column is done
if (locations[ l ] == Acol[k]) { // match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ j*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
}
else if (Acol[k] < locations[ l ]) { // need to check next element
k++;
}
else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
__syncthreads();
*/
int k = Arow[ locations[ tid+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ tid+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
dA[ tid*32 + idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
rA = dA[k*WARP_SIZE+tid];
if (k%WARP_SIZE == tid)
rB /= rA;
magmaDoubleComplex bottom = __shfl(rB, k%WARP_SIZE);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_zlowertrisystems_32kernel(
magma_int_t n,
magma_index_t *Arow,
magma_index_t *Acol,
magmaDoubleComplex *Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval,
magma_index_t *sizes,
magma_index_t *locations )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA;
magmaDoubleComplex dA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int size = sizes[ row ];
if( tid >= size ){
return;
}
// set dA to 0
for( int j=0; j<32; j++ ){
dA[ j ] = MAGMA_Z_ZERO;
}
// for debuggging: let thred 0 do everything
//if(tid==0){
{
// first: generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int k = Arow[ locations[ j+row*WARP_SIZE ] ];
int l = row*WARP_SIZE;
int idx = 0;
while( k < Arow[ locations[ j+row*WARP_SIZE ]+1 ] && l < (row+1)*WARP_SIZE ){ // stop once this column is done
if( locations[ l ] == Acol[k] ){ //match
if( tid == idx ){
dA[ j ] = Aval[ k ];
}
//__syncthreads();
// int loc = i*WARP_SIZE*WARP_SIZE + j*WARP_SIZE + idx;
k++;
l++;
idx++;
} else if( Acol[k] < locations[ l ] ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
}
// not sure whether we need this here....
//__syncthreads();
// second: solve the triangular systems - in registers
// Read B to regs.
rB = (tid == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < 32; k++)
{
rA = dA[ k ];
if (k%WARP_SIZE == tid)
rB /= rA;
magmaDoubleComplex top = __shfl(rB, k%WARP_SIZE);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ Mrow[row] + tid ] = rB;
#endif
}// kernel
__global__ void
magma_zuppertrisystems_32kernel(
magma_int_t n,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int row = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.x;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex rA[32];
// only if within this chunk
if ( row>=n ){
return;
}
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
int size = mlim - mstart;
if( tid >= size ){
return;
}
// set rA to 0
for( int j=0; j<32; j++ ){
rA[ j ] = MAGMA_Z_ZERO;
}
// generate the triangular systems
#pragma unroll
for( int j=0; j<size; j++ ){// no need for first
int t = Mcol[ mstart + j ];
int k = Arow[ t ];
int l = mstart;
int idx = 0;
while( k < Arow[ t+1 ] && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
if( tid == idx ){
rA[ j ] = Aval[ k ];
}
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = (tid == size-1) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 32-1; k >-1; k--)
{
if (k%32 == tid)
rB /= rA[k];
magmaDoubleComplex bottom = __shfl(rB, k%32);
if ( tid < k)
rB -= (bottom*rA[k]);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}// kernel
#endif // CUDA >= 7000
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_z_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_z_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems magmaDoubleComplex*
trisystems
@param[out]
rhs magmaDoubleComplex*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zisaigenerator_32_gpu(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_z_matrix L,
magma_z_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
magmaDoubleComplex *trisystems,
magmaDoubleComplex *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// routine 1
int r1bs1 = WARP_SIZE;
int r1bs2 = 1;
int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
dim3 r1block( r1bs1, r1bs2, 1 );
dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = WARP_SIZE;
int r2bs2 = 1;
int r2dg1 = magma_ceildiv( L.num_rows, r2bs1 );
int r2dg2 = 1;
int r2dg3 = 1;
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
int r3bs1 = WARP_SIZE;
int r3bs2 = 1;
int r3dg1 = magma_ceildiv( 32000, r2bs1 );
int r3dg2 = 1;
int r3dg3 = 1;
dim3 r3block( r3bs1, r3bs2, 1 );
dim3 r3grid( r3dg1, r3dg2, r3dg3 );
int recursive = magma_ceildiv( M->num_rows, 32000 );
if (arch >= 300) {
magma_zgpumemzero_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
rhs, L.num_rows, WARP_SIZE, 1);
if (uplotype == MagmaLower) {
magma_zlocations_lower_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
else {
magma_zlocations_upper_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
locations,
trisystems,
rhs );
}
/*
if (uplotype == MagmaLower) {
printf("in here lower\n");
magma_zlowertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval,
sizes,
locations );
}
else {
printf("in here upper\n");
magma_zuppertrisystems_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.drow,
L.dcol,
L.dval,
M->drow,
M->dcol,
M->dval );
}
*/
// chunk it recursively into batches of 3200
for (int z=0; z < recursive; z++) {
int limit = min(32000, L.num_rows-32000*z);
magma_zgpumemzero_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems, limit, WARP_SIZE, WARP_SIZE );
magma_zfilltrisystems_32kernel<<< r3grid, r3block, 0, queue->cuda_stream() >>>(
32000*z,
limit,
L.drow,
L.dcol,
L.dval,
sizes,
locations,
trisystems,
rhs );
// routine 2
if (uplotype == MagmaLower) {
ztrsv_lower_32kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
else {
ztrsv_upper_32kernel_switch<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
trisystems,
rhs+32000*32*z,
sizes+32000*z,
limit );
}
}
// routine 3
magma_zbackinsert_32kernel<<< r1grid, r1block, 0, queue->cuda_stream() >>>(
M->num_rows,
M->drow,
M->dcol,
M->dval,
sizes,
rhs );
}
else {
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA > 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
61ff8d23d69bb3f452c773c4f59c0e14bfb3ea5c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <kernels/gpu/prelu.h>
#include <core/tensor_builder.h>
#include "backend/name.h"
#include "global/operator_factory.h"
#include "global/fp16_operator_factory.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <device_launch_parameters.h>
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace gpu {
template<typename T>
__global__ static void prelu_kernel(const T* input_data, T* output_data,const T*slope, int dim_num, int last_dims, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size)
{
int slope_index = index % (dim_num * last_dims) / last_dims;
T val = input_data[index];
T max_temp = val > T(0) ? val : T(0);
T min_temp = val < T(0) ? val : T(0);
output_data[index] = max_temp + slope[slope_index] * min_temp;
}
}
#ifdef TS_USE_CUDA_FP16
template<>
__global__ void prelu_kernel<half>(const half* input_data, half* output_data, const half*slope, int dim_num, int last_dims, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
half zero(0.f);
if (index < size)
{
int slope_index = index % (dim_num * last_dims) / last_dims;
half val = input_data[index];
half max_temp = val > zero ? val : zero;
half min_temp = val < zero ? val : zero;
output_data[index] = max_temp + slope[slope_index] * min_temp;
}
}
#endif
template <typename T>
void cpu_prelu_compute_run(const Tensor &x, const Tensor &slope, int dim, Tensor &out) {
auto output_shape = out.sizes();
const T *input_data = x.data<T>();
T *output_data = out.data<T>();
const T *slope_data = slope.data<T>();
int count = out.count();
int dim_num = output_shape[dim];
int last_dims = 1;
for (int i = dim + 1; i < output_shape.size(); i++) {
last_dims *= output_shape[i];
}
dim3 blockSize(CUDA_THREAD_NUM);
dim3 gridSize(CUDA_BLOCK(count, blockSize.x));
RUN_KERNEL(prelu_kernel<T>, gridSize, blockSize,
input_data, output_data, slope_data, dim_num, last_dims, count);
}
void PReLU::prelu(const Tensor &x, const Tensor &slope, int dim, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { cpu_prelu_compute_run<TYPE>(x, slope, dim, out); break; }
//DECLARE_COMPUTE_RUN(INT8, int8_t);
//DECLARE_COMPUTE_RUN(UINT8, uint8_t);
//DECLARE_COMPUTE_RUN(INT16, int16_t);
//DECLARE_COMPUTE_RUN(UINT16, uint16_t);
//DECLARE_COMPUTE_RUN(INT32, int32_t);
//DECLARE_COMPUTE_RUN(UINT32, uint32_t);
//DECLARE_COMPUTE_RUN(INT64, int64_t);
//DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(PReLU, GPU, name::layer::prelu())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(PReLU, GPU, name::layer::prelu())
#endif
|
61ff8d23d69bb3f452c773c4f59c0e14bfb3ea5c.cu
|
#include <kernels/gpu/prelu.h>
#include <core/tensor_builder.h>
#include "backend/name.h"
#include "global/operator_factory.h"
#include "global/fp16_operator_factory.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <device_launch_parameters.h>
#include "kernels/gpu/gpu_kernel.h"
namespace ts {
namespace gpu {
template<typename T>
__global__ static void prelu_kernel(const T* input_data, T* output_data,const T*slope, int dim_num, int last_dims, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size)
{
int slope_index = index % (dim_num * last_dims) / last_dims;
T val = input_data[index];
T max_temp = val > T(0) ? val : T(0);
T min_temp = val < T(0) ? val : T(0);
output_data[index] = max_temp + slope[slope_index] * min_temp;
}
}
#ifdef TS_USE_CUDA_FP16
template<>
__global__ void prelu_kernel<half>(const half* input_data, half* output_data, const half*slope, int dim_num, int last_dims, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
half zero(0.f);
if (index < size)
{
int slope_index = index % (dim_num * last_dims) / last_dims;
half val = input_data[index];
half max_temp = val > zero ? val : zero;
half min_temp = val < zero ? val : zero;
output_data[index] = max_temp + slope[slope_index] * min_temp;
}
}
#endif
template <typename T>
void cpu_prelu_compute_run(const Tensor &x, const Tensor &slope, int dim, Tensor &out) {
auto output_shape = out.sizes();
const T *input_data = x.data<T>();
T *output_data = out.data<T>();
const T *slope_data = slope.data<T>();
int count = out.count();
int dim_num = output_shape[dim];
int last_dims = 1;
for (int i = dim + 1; i < output_shape.size(); i++) {
last_dims *= output_shape[i];
}
dim3 blockSize(CUDA_THREAD_NUM);
dim3 gridSize(CUDA_BLOCK(count, blockSize.x));
RUN_KERNEL(prelu_kernel<T>, gridSize, blockSize,
input_data, output_data, slope_data, dim_num, last_dims, count);
}
void PReLU::prelu(const Tensor &x, const Tensor &slope, int dim, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { cpu_prelu_compute_run<TYPE>(x, slope, dim, out); break; }
//DECLARE_COMPUTE_RUN(INT8, int8_t);
//DECLARE_COMPUTE_RUN(UINT8, uint8_t);
//DECLARE_COMPUTE_RUN(INT16, int16_t);
//DECLARE_COMPUTE_RUN(UINT16, uint16_t);
//DECLARE_COMPUTE_RUN(INT32, int32_t);
//DECLARE_COMPUTE_RUN(UINT32, uint32_t);
//DECLARE_COMPUTE_RUN(INT64, int64_t);
//DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(PReLU, GPU, name::layer::prelu())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(PReLU, GPU, name::layer::prelu())
#endif
|
663ee470a7b2392a45184cacd1d0728f5004abc3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <helper_cuda.h>
__device__ __host__ __inline__ size_t flatten_2d_index(const size_t x, const size_t y, const size_t w) {
return (y * w) + x;
}
__global__
void resize_kernel(const hipTextureObject_t tex, float4 __restrict__ *output, const float outw, const float outh) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
size_t idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx >= outw || idy >= outh) return;
// interpolation coordinates (assumes normalized texture coords!!)
float int_x = idx * outw;
float int_y = idy * outh;
size_t out_idx = flatten_2d_index(idx, idy, outw);
//float4 tmp;
//tmp.x = tmp.y = tmp.z = tmp.w = int_y;
output[out_idx] = tex2D<float4>(tex, int_x, int_y);
}
extern "C" {
void cuda_resize(const float4 *image, float4 *new_image, const size_t sizew, const size_t sizeh, const size_t neww, const size_t newh)
{
//
//hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
//hipArray* cuArray;
//hipMallocArray(&cuArray, &channelDesc, sizew, sizeh);
//hipMemcpyToArray(cuArray, 0, 0, image, sizew*sizeh*sizeof(float4), hipMemcpyHostToDevice);
//
//
size_t pitch;
float4 *d_image = 0;
hipMallocPitch<float4>(&d_image, &pitch, sizew*sizeof(float4), sizeh);
hipMemcpy2D(d_image, pitch, image, sizew*sizeof(float4),
sizew*sizeof(float4), sizeh, hipMemcpyHostToDevice);
//
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
//resDesc.resType = hipResourceTypeArray;
//resDesc.res.array.array = cuArray;
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.desc = hipCreateChannelDesc<float4>();
resDesc.res.pitch2D.devPtr = d_image;
resDesc.res.pitch2D.height = sizeh;
resDesc.res.pitch2D.width = sizew;
resDesc.res.pitch2D.pitchInBytes = pitch;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 1;
// create texture object
hipTextureObject_t tex = 0;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// make c and interp grid dev pointers
float4 *d_new_image;
hipMalloc((void **)&d_new_image, neww * newh * sizeof(float4));
dim3 blocksize(16, 16);
dim3 gridsize(neww / blocksize.x, newh / blocksize.y);
float width_norm = (1.0f / float(neww-1));
float height_norm = (1.0f / float(newh-1));
hipLaunchKernelGGL(( resize_kernel), dim3(gridsize), dim3(blocksize), 0, 0, tex, d_new_image, width_norm, height_norm);
// copy c back to host
hipMemcpy(new_image, d_new_image, neww * newh * sizeof(float4), hipMemcpyDeviceToHost);
// clean up
hipFree(d_new_image);
hipDestroyTextureObject(tex);
//hipFreeArray(cuArray);
hipFree(d_image);
}
}
|
663ee470a7b2392a45184cacd1d0728f5004abc3.cu
|
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <helper_cuda.h>
__device__ __host__ __inline__ size_t flatten_2d_index(const size_t x, const size_t y, const size_t w) {
return (y * w) + x;
}
__global__
void resize_kernel(const cudaTextureObject_t tex, float4 __restrict__ *output, const float outw, const float outh) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
size_t idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx >= outw || idy >= outh) return;
// interpolation coordinates (assumes normalized texture coords!!)
float int_x = idx * outw;
float int_y = idy * outh;
size_t out_idx = flatten_2d_index(idx, idy, outw);
//float4 tmp;
//tmp.x = tmp.y = tmp.z = tmp.w = int_y;
output[out_idx] = tex2D<float4>(tex, int_x, int_y);
}
extern "C" {
void cuda_resize(const float4 *image, float4 *new_image, const size_t sizew, const size_t sizeh, const size_t neww, const size_t newh)
{
//
//cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
//cudaArray* cuArray;
//cudaMallocArray(&cuArray, &channelDesc, sizew, sizeh);
//cudaMemcpyToArray(cuArray, 0, 0, image, sizew*sizeh*sizeof(float4), cudaMemcpyHostToDevice);
//
//
size_t pitch;
float4 *d_image = 0;
cudaMallocPitch<float4>(&d_image, &pitch, sizew*sizeof(float4), sizeh);
cudaMemcpy2D(d_image, pitch, image, sizew*sizeof(float4),
sizew*sizeof(float4), sizeh, cudaMemcpyHostToDevice);
//
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
//resDesc.resType = cudaResourceTypeArray;
//resDesc.res.array.array = cuArray;
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<float4>();
resDesc.res.pitch2D.devPtr = d_image;
resDesc.res.pitch2D.height = sizeh;
resDesc.res.pitch2D.width = sizew;
resDesc.res.pitch2D.pitchInBytes = pitch;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
// create texture object
cudaTextureObject_t tex = 0;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// make c and interp grid dev pointers
float4 *d_new_image;
cudaMalloc((void **)&d_new_image, neww * newh * sizeof(float4));
dim3 blocksize(16, 16);
dim3 gridsize(neww / blocksize.x, newh / blocksize.y);
float width_norm = (1.0f / float(neww-1));
float height_norm = (1.0f / float(newh-1));
resize_kernel<<<gridsize, blocksize>>>(tex, d_new_image, width_norm, height_norm);
// copy c back to host
cudaMemcpy(new_image, d_new_image, neww * newh * sizeof(float4), cudaMemcpyDeviceToHost);
// clean up
cudaFree(d_new_image);
cudaDestroyTextureObject(tex);
//cudaFreeArray(cuArray);
cudaFree(d_image);
}
}
|
54b3180aacd27887097079f880cc0855e5609f9e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<short, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
|
54b3180aacd27887097079f880cc0855e5609f9e.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<short, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
c8668e3931bc5f26fe6dffed1261aed2395be75c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (M-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (N-3)){
float __temp_0__;
__temp_0__ = (7 * input[__iter_0__+(M-0)*(__iter_1__+(-2))]);
float __temp_1__;
__temp_1__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
float __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
float __temp_3__;
__temp_3__ = (9 * input[__iter_0__+(-2)+(M-0)*(__iter_1__)]);
float __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
float __temp_5__;
__temp_5__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
float __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
float __temp_7__;
__temp_7__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
float __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
float __temp_9__;
__temp_9__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
float __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
float __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(2)+(M-0)*(__iter_1__)]);
float __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
float __temp_13__;
__temp_13__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
float __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
float __temp_15__;
__temp_15__ = (7 * input[__iter_0__+(M-0)*(__iter_1__+(2))]);
float __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
float __temp_17__;
__temp_17__ = (__temp_16__ / 118);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_17__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi (float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<1000; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __var_1__);
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
|
c8668e3931bc5f26fe6dffed1261aed2395be75c.cu
|
#include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (M-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (N-3)){
float __temp_0__;
__temp_0__ = (7 * input[__iter_0__+(M-0)*(__iter_1__+(-2))]);
float __temp_1__;
__temp_1__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
float __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
float __temp_3__;
__temp_3__ = (9 * input[__iter_0__+(-2)+(M-0)*(__iter_1__)]);
float __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
float __temp_5__;
__temp_5__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
float __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
float __temp_7__;
__temp_7__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
float __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
float __temp_9__;
__temp_9__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
float __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
float __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(2)+(M-0)*(__iter_1__)]);
float __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
float __temp_13__;
__temp_13__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
float __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
float __temp_15__;
__temp_15__ = (7 * input[__iter_0__+(M-0)*(__iter_1__+(2))]);
float __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
float __temp_17__;
__temp_17__ = (__temp_16__ / 118);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_17__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi (float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<1000; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __var_2__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __var_2__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __var_1__);
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
b29c21496aeec8c6256d423b216336d4a1c07acb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include <hip/hip_runtime.h>
#include "reduce_hip.cuh"
int main(int argc, char** argv) {
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
unsigned int n = atoi(argv[1]);
unsigned int thread_per_block = atoi(argv[2]);
int* A = new int[n];
int sum;
for (unsigned int i = 0; i < n; i++)
{
A[i] = 1;
}
hipEventRecord(startEvent, 0);
sum = reduce(A,n,thread_per_block);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, startEvent, stopEvent);
std::printf("%d \n", sum);
std::printf("%f \n", elapsedTime);
// Cleanup
delete[] A;
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
return 0;
}
|
b29c21496aeec8c6256d423b216336d4a1c07acb.cu
|
#include<iostream>
#include <cuda.h>
#include "reduce.cuh"
int main(int argc, char** argv) {
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
unsigned int n = atoi(argv[1]);
unsigned int thread_per_block = atoi(argv[2]);
int* A = new int[n];
int sum;
for (unsigned int i = 0; i < n; i++)
{
A[i] = 1;
}
cudaEventRecord(startEvent, 0);
sum = reduce(A,n,thread_per_block);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent);
std::printf("%d \n", sum);
std::printf("%f \n", elapsedTime);
// Cleanup
delete[] A;
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
return 0;
}
|
52750ef748c16daa26ecdd01d41af9ac0944c41c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
#if (__CUDACC_VER_MAJOR__ >= 9)
#define SHFL0(v) __shfl_sync(~0, v, 0)
#define SHFL(v, t) __shfl_sync(~0, v, t)
#define SHFL_XOR(v, m) __shfl_xor_sync(~0, v, m)
#define SYNCWARP() __syncwarp()
#define BALLOT(v) __ballot_sync(~0, v)
#else
#define SHFL0(v) __shfl(v, 0)
#define SHFL(v, t) __shfl(v, t)
#define SHFL_XOR(v, m) __shfl_xor(v, m)
#define SYNCWARP()
#define BALLOT(v) __ballot(v)
#endif
#define WARP_REDUCE_SUM_2(sum) sum += SHFL_XOR(sum, 1)
#define WARP_REDUCE_SUM_4(sum) WARP_REDUCE_SUM_2(sum); sum += SHFL_XOR(sum, 2)
#define WARP_REDUCE_SUM_8(sum) WARP_REDUCE_SUM_4(sum); sum += SHFL_XOR(sum, 4)
#define WARP_REDUCE_SUM_16(sum) WARP_REDUCE_SUM_8(sum); sum += SHFL_XOR(sum, 8)
#define WARP_REDUCE_SUM_32(sum) WARP_REDUCE_SUM_16(sum); sum += SHFL_XOR(sum, 16)
#define WARP_REDUCE_POS_2(pos, tmp, t) tmp = SHFL(pos, t & 0x1e); pos += (t & 1) ? tmp : 0;
#define WARP_REDUCE_POS_4(pos, tmp, t) WARP_REDUCE_POS_2(pos, tmp, t); tmp = SHFL(pos, (t & 0x1c) | 1); pos += (t & 2) ? tmp : 0;
#define WARP_REDUCE_POS_8(pos, tmp, t) WARP_REDUCE_POS_4(pos, tmp, t); tmp = SHFL(pos, (t & 0x18) | 3); pos += (t & 4) ? tmp : 0;
#define WARP_REDUCE_POS_16(pos, tmp, t) WARP_REDUCE_POS_8(pos, tmp, t); tmp = SHFL(pos, (t & 0x10) | 7); pos += (t & 8) ? tmp : 0;
#define WARP_REDUCE_POS_32(pos, tmp, t) WARP_REDUCE_POS_16(pos, tmp, t); tmp = SHFL(pos, 0xf); pos += (t & 16) ? tmp : 0;
#define MAX_SHORT_DICT_ENTRIES (10*1024)
#define INIT_HASH_BITS 12
/**
* @brief Compares two strings
*/
template<class T, const T lesser, const T greater, const T equal>
inline __device__ T nvstr_compare(const char *as, uint32_t alen, const char *bs, uint32_t blen)
{
uint32_t len = min(alen, blen);
uint32_t i = 0;
if (len >= 4)
{
uint32_t align_a = 3 & reinterpret_cast<uintptr_t>(as);
uint32_t align_b = 3 & reinterpret_cast<uintptr_t>(bs);
const uint32_t *as32 = reinterpret_cast<const uint32_t *>(as - align_a);
const uint32_t *bs32 = reinterpret_cast<const uint32_t *>(bs - align_b);
uint32_t ofsa = align_a * 8;
uint32_t ofsb = align_b * 8;
do {
uint32_t a = *as32++;
uint32_t b = *bs32++;
if (ofsa)
a = __funnelshift_r(a, *as32, ofsa);
if (ofsb)
b = __funnelshift_r(b, *bs32, ofsb);
if (a != b)
{
return (lesser == greater || __byte_perm(a, 0, 0x0123) < __byte_perm(b, 0, 0x0123)) ? lesser : greater;
}
i += 4;
} while (i + 4 <= len);
}
while (i < len)
{
uint8_t a = as[i];
uint8_t b = bs[i];
if (a != b)
{
return (a < b) ? lesser : greater;
}
++i;
}
return (alen == blen) ? equal : (alen < blen) ? lesser : greater;
}
static inline bool __device__ nvstr_is_lesser(const char *as, uint32_t alen, const char *bs, uint32_t blen)
{
return nvstr_compare<bool, true, false, false>(as, alen, bs, blen);
}
static inline bool __device__ nvstr_is_equal(const char *as, uint32_t alen, const char *bs, uint32_t blen)
{
return nvstr_compare<bool, false, false, true>(as, alen, bs, blen);
}
struct dictinit_state_s
{
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint16_t dict[MAX_SHORT_DICT_ENTRIES];
union {
uint16_t u16[1 << (INIT_HASH_BITS)];
uint32_t u32[1 << (INIT_HASH_BITS - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a byte sequence
*/
static inline __device__ uint32_t nvstr_init_hash(const uint8_t *ptr, uint32_t len)
{
if (len != 0)
{
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << INIT_HASH_BITS) - 1);
}
else
{
return 0;
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
*
**/
static __device__ void LoadNonNullIndices(volatile dictinit_state_s *s, int t)
{
if (t == 0)
{
s->nnz = 0;
}
for (uint32_t i = 0; i < s->chunk.num_rows; i += 512)
{
const uint32_t *valid_map = s->chunk.valid_map_base;
uint32_t is_valid, nz_map, nz_pos;
if (t < 16)
{
if (!valid_map)
{
s->scratch_red[t] = 0xffffffffu;
}
else
{
uint32_t row = s->chunk.start_row + i + t * 32;
uint32_t v = (row < s->chunk.start_row + s->chunk.num_rows) ? valid_map[row >> 5] : 0;
if (row & 0x1f)
{
uint32_t v1 = (row + 32 < s->chunk.start_row + s->chunk.num_rows) ? valid_map[(row >> 5) + 1] : 0;
v = __funnelshift_r(v, v1, row & 0x1f);
}
s->scratch_red[t] = v;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
nz_map = BALLOT(is_valid);
nz_pos = s->nnz + __popc(nz_map & (0x7fffffffu >> (0x1fu - ((uint32_t)t & 0x1f))));
if (!(t & 0x1f))
{
s->scratch_red[16 + (t >> 5)] = __popc(nz_map);
}
__syncthreads();
if (t < 32)
{
uint32_t nnz = s->scratch_red[16 + (t & 0xf)];
uint32_t nnz_pos = nnz, tmp;
WARP_REDUCE_POS_16(nnz_pos, tmp, t);
if (t == 0xf)
{
s->nnz += nnz_pos;
}
if (t <= 0xf)
{
s->scratch_red[t] = nnz_pos - nnz;
}
}
__syncthreads();
if (is_valid)
{
s->dict[nz_pos + s->scratch_red[t >> 5]] = i + t;
}
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*
**/
// blockDim {512,1,1}
extern "C" __global__ void __launch_bounds__(512, 3)
gpuInitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) dictinit_state_s state_g;
dictinit_state_s * const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
const nvstrdesc_s *ck_data;
uint32_t *dict_data;
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t < sizeof(DictionaryChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[group_id * num_columns + col_id])[t];
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += 512)
{
if (i + t < sizeof(s->map) / sizeof(uint32_t))
s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when there are no nulls)
LoadNonNullIndices(s, t);
// Sum the lengths of all the strings
if (t == 0)
{
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
ck_data = reinterpret_cast<const nvstrdesc_s *>(s->chunk.column_data_base) + start_row;
for (uint32_t i = 0; i < nnz; i += 512)
{
uint32_t ck_row = 0, len = 0, hash;
const uint8_t *ptr = 0;
if (i + t < nnz)
{
ck_row = s->dict[i + t];
ptr = reinterpret_cast<const uint8_t *>(ck_data[ck_row].ptr);
len = ck_data[ck_row].count;
hash = nvstr_init_hash(ptr, len);
}
WARP_REDUCE_SUM_16(len);
s->scratch_red[t >> 4] = len;
__syncthreads();
if (t < 32)
{
len = s->scratch_red[t];
WARP_REDUCE_SUM_32(len);
if (t == 0)
s->chunk.string_char_count += len;
}
if (i + t < nnz)
{
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
#if (INIT_HASH_BITS != 12)
#error "Hardcoded for INIT_HASH_BITS=12"
#endif
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
uint32_t sum_w, tmp;
sum23 += (sum01 >> 16) * 0x10001;
sum45 += (sum23 >> 16) * 0x10001;
sum67 += (sum45 >> 16) * 0x10001;
sum_w = sum67 >> 16;
WARP_REDUCE_POS_16(sum_w, tmp, t);
if ((t & 0xf) == 0xf)
{
s->scratch_red[t >> 4] = sum_w;
}
__syncthreads();
if (t < 32)
{
uint32_t sum_b = s->scratch_red[t];
WARP_REDUCE_POS_32(sum_b, tmp, t);
s->scratch_red[t] = sum_b;
}
__syncthreads();
tmp = (t >= 16) ? s->scratch_red[(t >> 4) - 1] : 0;
sum_w = (sum_w - (sum67 >> 16) + tmp) * 0x10001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += 512)
{
uint32_t ck_row = 0, pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row;
bool collision;
if (i + t < nnz)
{
const uint8_t *ptr;
uint32_t len;
ck_row = dict_data[i + t] - start_row;
ptr = reinterpret_cast<const uint8_t *>(ck_data[ck_row].ptr);
len = (uint32_t)ck_data[ck_row].count;
hash = nvstr_init_hash(ptr, len);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic behavior for the
// first row in the hash map that will be used for early duplicate detection
// The lack of 16-bit atomicMin makes this a bit messy...
__syncthreads();
if (i + t < nnz)
{
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
collision = false;
if (i + t < nnz)
{
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision)
{
colliding_row = s->dict[pos_old];
}
}
__syncthreads();
// evens
if (collision && !(pos_old & 1))
{
uint32_t *dict32 = reinterpret_cast<uint32_t *>(&s->dict[pos_old]);
atomicMin(dict32, (dict32[0] & 0xffff0000) | ck_row);
}
__syncthreads();
// odds
if (collision && (pos_old & 1))
{
uint32_t *dict32 = reinterpret_cast<uint32_t *>(&s->dict[pos_old-1]);
atomicMin(dict32, (dict32[0] & 0x0000ffff) | (ck_row << 16));
}
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old])
{
s->dict[pos] = colliding_row;
}
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash map,
// the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += 512)
{
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0, dupe_mask, dupes_before;
if (i + t < nnz)
{
const char *str1, *str2;
uint32_t len1, len2, hash;
ck_row = s->dict[i + t];
str1 = ck_data[ck_row].ptr;
len1 = (uint32_t)ck_data[ck_row].count;
hash = nvstr_init_hash(reinterpret_cast<const uint8_t *>(str1), len1);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row)
{
str2 = ck_data[ck_row_ref].ptr;
len2 = (uint32_t)ck_data[ck_row_ref].count;
is_dupe = nvstr_is_equal(str1, len1, str2, len2);
dict_char_count += (is_dupe) ? 0 : len1;
}
}
dupe_mask = BALLOT(is_dupe);
dupes_before = s->total_dupes + __popc(dupe_mask & ((2 << (t & 0x1f)) - 1));
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = __popc(dupe_mask);
}
__syncthreads();
if (t < 32)
{
uint32_t warp_dupes = (t < 16) ? s->scratch_red[t] : 0;
uint32_t warp_pos = warp_dupes, tmp;
WARP_REDUCE_POS_16(warp_pos, tmp, t);
if (t == 0xf)
{
s->total_dupes += warp_pos;
}
if (t < 16)
{
s->scratch_red[t] = warp_pos - warp_dupes;
}
}
__syncthreads();
if (i + t < nnz)
{
if (!is_dupe)
{
dupes_before += s->scratch_red[t >> 5];
dict_data[i + t - dupes_before] = ck_row + start_row;
}
else
{
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
WARP_REDUCE_SUM_32(dict_char_count);
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = dict_char_count;
}
__syncthreads();
if (t < 32)
{
dict_char_count = (t < 16) ? s->scratch_red[t] : 0;
WARP_REDUCE_SUM_16(dict_char_count);
}
if (!t)
{
chunks[group_id * num_columns + col_id].num_strings = nnz;
chunks[group_id * num_columns + col_id].string_char_count = s->chunk.string_char_count;
chunks[group_id * num_columns + col_id].num_dict_strings = nnz - s->total_dupes;
chunks[group_id * num_columns + col_id].dict_char_count = dict_char_count;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*
**/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(StripeDictionary *stripes, DictionaryChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t* volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t *src;
uint32_t *dst;
if (t < sizeof(StripeDictionary) / sizeof(uint32_t))
{
((volatile uint32_t *)&stripe_g)[t] = ((const uint32_t *)&stripes[stripe_id * num_columns + col_id])[t];
}
__syncthreads();
if (t < sizeof(DictionaryChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&chunk_g)[t] = ((const uint32_t *)&chunks[stripe_g.start_chunk * num_columns + col_id])[t];
}
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++)
{
if (!t)
{
src = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].dict_data;
chunk_len = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst)
{
for (uint32_t i = 0; i < chunk_len; i += 1024)
{
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len)
dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s
{
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*
**/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuBuildStripeDictionaries(StripeDictionary *stripes, uint32_t num_columns)
{
__shared__ __align__(16) build_state_s state_g;
volatile build_state_s * const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
const nvstrdesc_s *str_data;
int t = threadIdx.x;
if (t < sizeof(StripeDictionary) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->stripe)[t] = ((const uint32_t *)&stripes[stripe_id * num_columns + col_id])[t];
}
if (t == 31 * 32)
{
s->total_dupes = 0;
}
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data)
return;
dict_index = s->stripe.dict_index;
str_data = reinterpret_cast<const nvstrdesc_s *>(s->stripe.column_data_base);
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += 1024)
{
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t dupe_mask, dupes_before, cur_len = 0;
const char *cur_ptr;
bool is_dupe = false;
if (i + t < num_strings)
{
cur_ptr = str_data[cur].ptr;
cur_len = str_data[cur].count;
}
if (i + t != 0 && i + t < num_strings)
{
uint32_t prev = dict_data[i + t - 1];
is_dupe = nvstr_is_equal(cur_ptr, cur_len, str_data[prev].ptr, str_data[prev].count);
}
dict_char_count += (is_dupe) ? 0 : cur_len;
dupe_mask = BALLOT(is_dupe);
dupes_before = s->total_dupes + __popc(dupe_mask & ((2 << (t & 0x1f)) - 1));
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = __popc(dupe_mask);
}
__syncthreads();
if (t < 32)
{
uint32_t warp_dupes = s->scratch_red[t];
uint32_t warp_pos = warp_dupes, tmp;
WARP_REDUCE_POS_32(warp_pos, tmp, t);
if (t == 0x1f)
{
s->total_dupes += warp_pos;
}
s->scratch_red[t] = warp_pos - warp_dupes;
}
__syncthreads();
if (i + t < num_strings)
{
dupes_before += s->scratch_red[t >> 5];
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0)
{
dict_data[i + t - dupes_before] = cur;
}
}
__syncthreads();
}
WARP_REDUCE_SUM_32(dict_char_count);
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = dict_char_count;
}
__syncthreads();
if (t < 32)
{
dict_char_count = s->scratch_red[t];
WARP_REDUCE_SUM_32(dict_char_count);
}
if (t == 0)
{
stripes[stripe_id * num_columns + col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id * num_columns + col_id].dict_char_count = dict_char_count;
}
}
/**
* @brief Launches kernel for initializing dictionary chunks
*
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t InitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups, hipStream_t stream)
{
dim3 dim_block(512, 1); // 512 threads per chunk
dim3 dim_grid(num_columns, num_rowgroups);
hipLaunchKernelGGL(( gpuInitDictionaryIndices) , dim3(dim_grid), dim3(dim_block), 0, stream , chunks, num_columns);
return hipSuccess;
}
/**
* @brief Launches kernel for building stripe dictionaries
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] stripes_host StripeDictionary host array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] num_columns Number of columns
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t BuildStripeDictionaries(StripeDictionary *stripes, StripeDictionary *stripes_host, DictionaryChunk *chunks,
uint32_t num_stripes, uint32_t num_rowgroups, uint32_t num_columns, hipStream_t stream)
{
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
hipLaunchKernelGGL(( gpuCompactChunkDictionaries) , dim3(dim_grid_build), dim3(dim_block), 0, stream , stripes, chunks, num_columns);
for (uint32_t i = 0; i < num_stripes * num_columns; i++)
{
if (stripes_host[i].dict_data != nullptr)
{
thrust::device_ptr<uint32_t> p = thrust::device_pointer_cast(stripes_host[i].dict_data);
const nvstrdesc_s *str_data = reinterpret_cast<const nvstrdesc_s *>(stripes_host[i].column_data_base);
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream)->on(stream), p, p + stripes_host[i].num_strings,
[str_data] __device__(const uint32_t &lhs, const uint32_t &rhs) {
return nvstr_is_lesser(str_data[lhs].ptr, (uint32_t)str_data[lhs].count, str_data[rhs].ptr, (uint32_t)str_data[rhs].count);
});
}
}
hipLaunchKernelGGL(( gpuBuildStripeDictionaries) , dim3(dim_grid_build), dim3(dim_block), 0, stream , stripes, num_columns);
return hipSuccess;
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
52750ef748c16daa26ecdd01d41af9ac0944c41c.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
#if (__CUDACC_VER_MAJOR__ >= 9)
#define SHFL0(v) __shfl_sync(~0, v, 0)
#define SHFL(v, t) __shfl_sync(~0, v, t)
#define SHFL_XOR(v, m) __shfl_xor_sync(~0, v, m)
#define SYNCWARP() __syncwarp()
#define BALLOT(v) __ballot_sync(~0, v)
#else
#define SHFL0(v) __shfl(v, 0)
#define SHFL(v, t) __shfl(v, t)
#define SHFL_XOR(v, m) __shfl_xor(v, m)
#define SYNCWARP()
#define BALLOT(v) __ballot(v)
#endif
#define WARP_REDUCE_SUM_2(sum) sum += SHFL_XOR(sum, 1)
#define WARP_REDUCE_SUM_4(sum) WARP_REDUCE_SUM_2(sum); sum += SHFL_XOR(sum, 2)
#define WARP_REDUCE_SUM_8(sum) WARP_REDUCE_SUM_4(sum); sum += SHFL_XOR(sum, 4)
#define WARP_REDUCE_SUM_16(sum) WARP_REDUCE_SUM_8(sum); sum += SHFL_XOR(sum, 8)
#define WARP_REDUCE_SUM_32(sum) WARP_REDUCE_SUM_16(sum); sum += SHFL_XOR(sum, 16)
#define WARP_REDUCE_POS_2(pos, tmp, t) tmp = SHFL(pos, t & 0x1e); pos += (t & 1) ? tmp : 0;
#define WARP_REDUCE_POS_4(pos, tmp, t) WARP_REDUCE_POS_2(pos, tmp, t); tmp = SHFL(pos, (t & 0x1c) | 1); pos += (t & 2) ? tmp : 0;
#define WARP_REDUCE_POS_8(pos, tmp, t) WARP_REDUCE_POS_4(pos, tmp, t); tmp = SHFL(pos, (t & 0x18) | 3); pos += (t & 4) ? tmp : 0;
#define WARP_REDUCE_POS_16(pos, tmp, t) WARP_REDUCE_POS_8(pos, tmp, t); tmp = SHFL(pos, (t & 0x10) | 7); pos += (t & 8) ? tmp : 0;
#define WARP_REDUCE_POS_32(pos, tmp, t) WARP_REDUCE_POS_16(pos, tmp, t); tmp = SHFL(pos, 0xf); pos += (t & 16) ? tmp : 0;
#define MAX_SHORT_DICT_ENTRIES (10*1024)
#define INIT_HASH_BITS 12
/**
* @brief Compares two strings
*/
template<class T, const T lesser, const T greater, const T equal>
inline __device__ T nvstr_compare(const char *as, uint32_t alen, const char *bs, uint32_t blen)
{
uint32_t len = min(alen, blen);
uint32_t i = 0;
if (len >= 4)
{
uint32_t align_a = 3 & reinterpret_cast<uintptr_t>(as);
uint32_t align_b = 3 & reinterpret_cast<uintptr_t>(bs);
const uint32_t *as32 = reinterpret_cast<const uint32_t *>(as - align_a);
const uint32_t *bs32 = reinterpret_cast<const uint32_t *>(bs - align_b);
uint32_t ofsa = align_a * 8;
uint32_t ofsb = align_b * 8;
do {
uint32_t a = *as32++;
uint32_t b = *bs32++;
if (ofsa)
a = __funnelshift_r(a, *as32, ofsa);
if (ofsb)
b = __funnelshift_r(b, *bs32, ofsb);
if (a != b)
{
return (lesser == greater || __byte_perm(a, 0, 0x0123) < __byte_perm(b, 0, 0x0123)) ? lesser : greater;
}
i += 4;
} while (i + 4 <= len);
}
while (i < len)
{
uint8_t a = as[i];
uint8_t b = bs[i];
if (a != b)
{
return (a < b) ? lesser : greater;
}
++i;
}
return (alen == blen) ? equal : (alen < blen) ? lesser : greater;
}
static inline bool __device__ nvstr_is_lesser(const char *as, uint32_t alen, const char *bs, uint32_t blen)
{
return nvstr_compare<bool, true, false, false>(as, alen, bs, blen);
}
static inline bool __device__ nvstr_is_equal(const char *as, uint32_t alen, const char *bs, uint32_t blen)
{
return nvstr_compare<bool, false, false, true>(as, alen, bs, blen);
}
struct dictinit_state_s
{
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint16_t dict[MAX_SHORT_DICT_ENTRIES];
union {
uint16_t u16[1 << (INIT_HASH_BITS)];
uint32_t u32[1 << (INIT_HASH_BITS - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a byte sequence
*/
static inline __device__ uint32_t nvstr_init_hash(const uint8_t *ptr, uint32_t len)
{
if (len != 0)
{
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << INIT_HASH_BITS) - 1);
}
else
{
return 0;
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
*
**/
static __device__ void LoadNonNullIndices(volatile dictinit_state_s *s, int t)
{
if (t == 0)
{
s->nnz = 0;
}
for (uint32_t i = 0; i < s->chunk.num_rows; i += 512)
{
const uint32_t *valid_map = s->chunk.valid_map_base;
uint32_t is_valid, nz_map, nz_pos;
if (t < 16)
{
if (!valid_map)
{
s->scratch_red[t] = 0xffffffffu;
}
else
{
uint32_t row = s->chunk.start_row + i + t * 32;
uint32_t v = (row < s->chunk.start_row + s->chunk.num_rows) ? valid_map[row >> 5] : 0;
if (row & 0x1f)
{
uint32_t v1 = (row + 32 < s->chunk.start_row + s->chunk.num_rows) ? valid_map[(row >> 5) + 1] : 0;
v = __funnelshift_r(v, v1, row & 0x1f);
}
s->scratch_red[t] = v;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
nz_map = BALLOT(is_valid);
nz_pos = s->nnz + __popc(nz_map & (0x7fffffffu >> (0x1fu - ((uint32_t)t & 0x1f))));
if (!(t & 0x1f))
{
s->scratch_red[16 + (t >> 5)] = __popc(nz_map);
}
__syncthreads();
if (t < 32)
{
uint32_t nnz = s->scratch_red[16 + (t & 0xf)];
uint32_t nnz_pos = nnz, tmp;
WARP_REDUCE_POS_16(nnz_pos, tmp, t);
if (t == 0xf)
{
s->nnz += nnz_pos;
}
if (t <= 0xf)
{
s->scratch_red[t] = nnz_pos - nnz;
}
}
__syncthreads();
if (is_valid)
{
s->dict[nz_pos + s->scratch_red[t >> 5]] = i + t;
}
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*
**/
// blockDim {512,1,1}
extern "C" __global__ void __launch_bounds__(512, 3)
gpuInitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) dictinit_state_s state_g;
dictinit_state_s * const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
const nvstrdesc_s *ck_data;
uint32_t *dict_data;
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t < sizeof(DictionaryChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[group_id * num_columns + col_id])[t];
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += 512)
{
if (i + t < sizeof(s->map) / sizeof(uint32_t))
s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when there are no nulls)
LoadNonNullIndices(s, t);
// Sum the lengths of all the strings
if (t == 0)
{
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
ck_data = reinterpret_cast<const nvstrdesc_s *>(s->chunk.column_data_base) + start_row;
for (uint32_t i = 0; i < nnz; i += 512)
{
uint32_t ck_row = 0, len = 0, hash;
const uint8_t *ptr = 0;
if (i + t < nnz)
{
ck_row = s->dict[i + t];
ptr = reinterpret_cast<const uint8_t *>(ck_data[ck_row].ptr);
len = ck_data[ck_row].count;
hash = nvstr_init_hash(ptr, len);
}
WARP_REDUCE_SUM_16(len);
s->scratch_red[t >> 4] = len;
__syncthreads();
if (t < 32)
{
len = s->scratch_red[t];
WARP_REDUCE_SUM_32(len);
if (t == 0)
s->chunk.string_char_count += len;
}
if (i + t < nnz)
{
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
#if (INIT_HASH_BITS != 12)
#error "Hardcoded for INIT_HASH_BITS=12"
#endif
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
uint32_t sum_w, tmp;
sum23 += (sum01 >> 16) * 0x10001;
sum45 += (sum23 >> 16) * 0x10001;
sum67 += (sum45 >> 16) * 0x10001;
sum_w = sum67 >> 16;
WARP_REDUCE_POS_16(sum_w, tmp, t);
if ((t & 0xf) == 0xf)
{
s->scratch_red[t >> 4] = sum_w;
}
__syncthreads();
if (t < 32)
{
uint32_t sum_b = s->scratch_red[t];
WARP_REDUCE_POS_32(sum_b, tmp, t);
s->scratch_red[t] = sum_b;
}
__syncthreads();
tmp = (t >= 16) ? s->scratch_red[(t >> 4) - 1] : 0;
sum_w = (sum_w - (sum67 >> 16) + tmp) * 0x10001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += 512)
{
uint32_t ck_row = 0, pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row;
bool collision;
if (i + t < nnz)
{
const uint8_t *ptr;
uint32_t len;
ck_row = dict_data[i + t] - start_row;
ptr = reinterpret_cast<const uint8_t *>(ck_data[ck_row].ptr);
len = (uint32_t)ck_data[ck_row].count;
hash = nvstr_init_hash(ptr, len);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic behavior for the
// first row in the hash map that will be used for early duplicate detection
// The lack of 16-bit atomicMin makes this a bit messy...
__syncthreads();
if (i + t < nnz)
{
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
collision = false;
if (i + t < nnz)
{
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision)
{
colliding_row = s->dict[pos_old];
}
}
__syncthreads();
// evens
if (collision && !(pos_old & 1))
{
uint32_t *dict32 = reinterpret_cast<uint32_t *>(&s->dict[pos_old]);
atomicMin(dict32, (dict32[0] & 0xffff0000) | ck_row);
}
__syncthreads();
// odds
if (collision && (pos_old & 1))
{
uint32_t *dict32 = reinterpret_cast<uint32_t *>(&s->dict[pos_old-1]);
atomicMin(dict32, (dict32[0] & 0x0000ffff) | (ck_row << 16));
}
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old])
{
s->dict[pos] = colliding_row;
}
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash map,
// the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += 512)
{
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0, dupe_mask, dupes_before;
if (i + t < nnz)
{
const char *str1, *str2;
uint32_t len1, len2, hash;
ck_row = s->dict[i + t];
str1 = ck_data[ck_row].ptr;
len1 = (uint32_t)ck_data[ck_row].count;
hash = nvstr_init_hash(reinterpret_cast<const uint8_t *>(str1), len1);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row)
{
str2 = ck_data[ck_row_ref].ptr;
len2 = (uint32_t)ck_data[ck_row_ref].count;
is_dupe = nvstr_is_equal(str1, len1, str2, len2);
dict_char_count += (is_dupe) ? 0 : len1;
}
}
dupe_mask = BALLOT(is_dupe);
dupes_before = s->total_dupes + __popc(dupe_mask & ((2 << (t & 0x1f)) - 1));
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = __popc(dupe_mask);
}
__syncthreads();
if (t < 32)
{
uint32_t warp_dupes = (t < 16) ? s->scratch_red[t] : 0;
uint32_t warp_pos = warp_dupes, tmp;
WARP_REDUCE_POS_16(warp_pos, tmp, t);
if (t == 0xf)
{
s->total_dupes += warp_pos;
}
if (t < 16)
{
s->scratch_red[t] = warp_pos - warp_dupes;
}
}
__syncthreads();
if (i + t < nnz)
{
if (!is_dupe)
{
dupes_before += s->scratch_red[t >> 5];
dict_data[i + t - dupes_before] = ck_row + start_row;
}
else
{
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
WARP_REDUCE_SUM_32(dict_char_count);
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = dict_char_count;
}
__syncthreads();
if (t < 32)
{
dict_char_count = (t < 16) ? s->scratch_red[t] : 0;
WARP_REDUCE_SUM_16(dict_char_count);
}
if (!t)
{
chunks[group_id * num_columns + col_id].num_strings = nnz;
chunks[group_id * num_columns + col_id].string_char_count = s->chunk.string_char_count;
chunks[group_id * num_columns + col_id].num_dict_strings = nnz - s->total_dupes;
chunks[group_id * num_columns + col_id].dict_char_count = dict_char_count;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*
**/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(StripeDictionary *stripes, DictionaryChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t* volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t *src;
uint32_t *dst;
if (t < sizeof(StripeDictionary) / sizeof(uint32_t))
{
((volatile uint32_t *)&stripe_g)[t] = ((const uint32_t *)&stripes[stripe_id * num_columns + col_id])[t];
}
__syncthreads();
if (t < sizeof(DictionaryChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&chunk_g)[t] = ((const uint32_t *)&chunks[stripe_g.start_chunk * num_columns + col_id])[t];
}
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++)
{
if (!t)
{
src = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].dict_data;
chunk_len = chunks[(stripe_g.start_chunk + g) * num_columns + col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst)
{
for (uint32_t i = 0; i < chunk_len; i += 1024)
{
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len)
dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s
{
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*
**/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuBuildStripeDictionaries(StripeDictionary *stripes, uint32_t num_columns)
{
__shared__ __align__(16) build_state_s state_g;
volatile build_state_s * const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
const nvstrdesc_s *str_data;
int t = threadIdx.x;
if (t < sizeof(StripeDictionary) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->stripe)[t] = ((const uint32_t *)&stripes[stripe_id * num_columns + col_id])[t];
}
if (t == 31 * 32)
{
s->total_dupes = 0;
}
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data)
return;
dict_index = s->stripe.dict_index;
str_data = reinterpret_cast<const nvstrdesc_s *>(s->stripe.column_data_base);
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += 1024)
{
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t dupe_mask, dupes_before, cur_len = 0;
const char *cur_ptr;
bool is_dupe = false;
if (i + t < num_strings)
{
cur_ptr = str_data[cur].ptr;
cur_len = str_data[cur].count;
}
if (i + t != 0 && i + t < num_strings)
{
uint32_t prev = dict_data[i + t - 1];
is_dupe = nvstr_is_equal(cur_ptr, cur_len, str_data[prev].ptr, str_data[prev].count);
}
dict_char_count += (is_dupe) ? 0 : cur_len;
dupe_mask = BALLOT(is_dupe);
dupes_before = s->total_dupes + __popc(dupe_mask & ((2 << (t & 0x1f)) - 1));
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = __popc(dupe_mask);
}
__syncthreads();
if (t < 32)
{
uint32_t warp_dupes = s->scratch_red[t];
uint32_t warp_pos = warp_dupes, tmp;
WARP_REDUCE_POS_32(warp_pos, tmp, t);
if (t == 0x1f)
{
s->total_dupes += warp_pos;
}
s->scratch_red[t] = warp_pos - warp_dupes;
}
__syncthreads();
if (i + t < num_strings)
{
dupes_before += s->scratch_red[t >> 5];
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0)
{
dict_data[i + t - dupes_before] = cur;
}
}
__syncthreads();
}
WARP_REDUCE_SUM_32(dict_char_count);
if (!(t & 0x1f))
{
s->scratch_red[t >> 5] = dict_char_count;
}
__syncthreads();
if (t < 32)
{
dict_char_count = s->scratch_red[t];
WARP_REDUCE_SUM_32(dict_char_count);
}
if (t == 0)
{
stripes[stripe_id * num_columns + col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id * num_columns + col_id].dict_char_count = dict_char_count;
}
}
/**
* @brief Launches kernel for initializing dictionary chunks
*
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t InitDictionaryIndices(DictionaryChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups, cudaStream_t stream)
{
dim3 dim_block(512, 1); // 512 threads per chunk
dim3 dim_grid(num_columns, num_rowgroups);
gpuInitDictionaryIndices <<< dim_grid, dim_block, 0, stream >>>(chunks, num_columns);
return cudaSuccess;
}
/**
* @brief Launches kernel for building stripe dictionaries
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] stripes_host StripeDictionary host array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] num_columns Number of columns
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t BuildStripeDictionaries(StripeDictionary *stripes, StripeDictionary *stripes_host, DictionaryChunk *chunks,
uint32_t num_stripes, uint32_t num_rowgroups, uint32_t num_columns, cudaStream_t stream)
{
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
gpuCompactChunkDictionaries <<< dim_grid_build, dim_block, 0, stream >>>(stripes, chunks, num_columns);
for (uint32_t i = 0; i < num_stripes * num_columns; i++)
{
if (stripes_host[i].dict_data != nullptr)
{
thrust::device_ptr<uint32_t> p = thrust::device_pointer_cast(stripes_host[i].dict_data);
const nvstrdesc_s *str_data = reinterpret_cast<const nvstrdesc_s *>(stripes_host[i].column_data_base);
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream)->on(stream), p, p + stripes_host[i].num_strings,
[str_data] __device__(const uint32_t &lhs, const uint32_t &rhs) {
return nvstr_is_lesser(str_data[lhs].ptr, (uint32_t)str_data[lhs].count, str_data[rhs].ptr, (uint32_t)str_data[rhs].count);
});
}
}
gpuBuildStripeDictionaries <<< dim_grid_build, dim_block, 0, stream >>>(stripes, num_columns);
return cudaSuccess;
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
91aa4794e631e8940a6232a455a2a95d074dc10d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_computePSF_signalNsqrtMany_fcrop (int n, int sizeSubImage, int sizeSubImageFull,float *result, float *fft,float divide, int *sparseIndexEvenShift2D, int *sparseIndexOddShift2D)
{
float x,y;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
int id2=id%sizeSubImage;
int id3=id/sizeSubImage;
int id4=id3*sizeSubImageFull;
if (id < n)
{
x=fft[sparseIndexEvenShift2D[id2]+id4*2]/divide;
y=fft[sparseIndexOddShift2D[id2]+id4*2]/divide;
result[id]=sqrt(x*x+y*y);
}
}
|
91aa4794e631e8940a6232a455a2a95d074dc10d.cu
|
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_computePSF_signalNsqrtMany_fcrop (int n, int sizeSubImage, int sizeSubImageFull,float *result, float *fft,float divide, int *sparseIndexEvenShift2D, int *sparseIndexOddShift2D)
{
float x,y;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
int id2=id%sizeSubImage;
int id3=id/sizeSubImage;
int id4=id3*sizeSubImageFull;
if (id < n)
{
x=fft[sparseIndexEvenShift2D[id2]+id4*2]/divide;
y=fft[sparseIndexOddShift2D[id2]+id4*2]/divide;
result[id]=sqrt(x*x+y*y);
}
}
|
3b7fae85779853dacca3c915f57201ebdb8b7ea2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<cstdlib>
#include<conio.h>
#include <stdio.h>
#include <math.h>
#define blockSize 4
using namespace std;
__global__ void Addition(float *ad, float *bd, float *cd, const int n)
{
unsigned int i = blockSize*blockIdx.x + threadIdx.x;
unsigned int j = blockSize*blockIdx.y + threadIdx.y;
if (i < n && j < n)
{
cd[i + j*n] = ad[i + j*n] + bd[i + j*n];
}
}
int main()
{
const int n = 4;
float ah[n][n], bh[n][n], ch[n][n];
float *ad, *bd, *cd;
for (int i = 0; i<n; i++)
{
for (int j = 0; j<n; j++)
{
cin >> ah[i][j];
}
}
for (int i = 0; i<n; i++)
{
for (int j = 0; j<n; j++)
{
cin >> bh[i][j];
}
}
hipMalloc((void **)&ad, n*n*sizeof(int));
hipMalloc((void **)&bd, n*n*sizeof(int));
hipMalloc((void **)&cd, n*n*sizeof(int));
hipMemcpy(ad, ah, n*n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(bd, bh, n*n*sizeof(int), hipMemcpyHostToDevice);
dim3 dimGrid(n / blockSize, n / blockSize, 1);
dim3 dimBlock(blockSize, blockSize, 1);
Addition << <dimGrid, dimBlock >> > (ad, bd, cd, n);
hipMemcpy(ch, cd, n*n*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i<n; i++)
{
for (int j = 0; j < n; j++)
{
cout << ch[i][j] << " ";
}
cout << "\n";
}
getch();
}
/*
Example input for a and b
5 2 6 1
0 6 2 0
3 8 1 4
1 8 5 6
7 5 8 0
1 8 2 6
9 4 3 8
5 3 7 9
Required output of c
12 7 14 1
1 14 4 6
12 12 4 12
6 11 12 15
*/
|
3b7fae85779853dacca3c915f57201ebdb8b7ea2.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<cstdlib>
#include<conio.h>
#include <stdio.h>
#include <math.h>
#define blockSize 4
using namespace std;
__global__ void Addition(float *ad, float *bd, float *cd, const int n)
{
unsigned int i = blockSize*blockIdx.x + threadIdx.x;
unsigned int j = blockSize*blockIdx.y + threadIdx.y;
if (i < n && j < n)
{
cd[i + j*n] = ad[i + j*n] + bd[i + j*n];
}
}
int main()
{
const int n = 4;
float ah[n][n], bh[n][n], ch[n][n];
float *ad, *bd, *cd;
for (int i = 0; i<n; i++)
{
for (int j = 0; j<n; j++)
{
cin >> ah[i][j];
}
}
for (int i = 0; i<n; i++)
{
for (int j = 0; j<n; j++)
{
cin >> bh[i][j];
}
}
cudaMalloc((void **)&ad, n*n*sizeof(int));
cudaMalloc((void **)&bd, n*n*sizeof(int));
cudaMalloc((void **)&cd, n*n*sizeof(int));
cudaMemcpy(ad, ah, n*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(bd, bh, n*n*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(n / blockSize, n / blockSize, 1);
dim3 dimBlock(blockSize, blockSize, 1);
Addition << <dimGrid, dimBlock >> > (ad, bd, cd, n);
cudaMemcpy(ch, cd, n*n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i<n; i++)
{
for (int j = 0; j < n; j++)
{
cout << ch[i][j] << " ";
}
cout << "\n";
}
getch();
}
/*
Example input for a and b
5 2 6 1
0 6 2 0
3 8 1 4
1 8 5 6
7 5 8 0
1 8 2 6
9 4 3 8
5 3 7 9
Required output of c
12 7 14 1
1 14 4 6
12 12 4 12
6 11 12 15
*/
|
a3f6e551cef05525c150558d1f2d9f418789c876.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: JIANG Yufan (email: [email protected]) 2019-04-05
*/
#include <math.h>
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Binary.h"
#include "Binary.cuh"
#include "hip/hip_fp16.h"
#include "cublas_api.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
__device__
int BinaryCudaMod(int x, int base)
{
return x % base;
}
template<class T1, class T2>
__device__
T1 BinaryCudaDescale(T1 x, T2 num)
{
return x / T1(num);
}
template<class T1, class T2>
__device__
T1 BinaryCudaPower(T1 x, T2 num)
{
if (T1(num) == T1(0))
return (T1)1.0;
else if (T1(num) == T1(0.5))
return (T1)sqrt((float)x);
else if (T1(num) == T1(2))
return (T1)(x * x);
else {
if (x == T1(0) && T1(num) < T1(0))
return (T1)1e9F;
else
return (T1)pow((float)x, (float)num);
}
}
template<class T1, class T2>
__device__
T1 BinaryCudaScale(T1 x, T2 num)
{
return x * T1(num);
}
template<class T1, class T2>
__device__
T1 BinaryCudaShift(T1 x, T2 num)
{
return x + T1(num);
}
#ifdef HALF_PRECISION
#define SIMPLE_BINARY_FUNCTION_GPU(funcName, origFunc) \
template<class T1, class T2> \
__global__ \
void Kernel##funcName(T1 * a, T1 * b, int size, T2 num) \
{ \
int i = blockDim.x * blockIdx.x + threadIdx.x; \
\
if (i < size) \
b[i] = (T1)origFunc((T1)a[i], (T2)num); \
} \
\
template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
\
int gridSize[3]; \
int blockSize[3]; \
\
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); \
\
dim3 blocks(gridSize[0]); \
dim3 threads(blockSize[0]); \
\
int devIDBackup; \
ProtectCudaDev(a->devID, devIDBackup); \
\
if (a->dataType == X_FLOAT) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(float*)a->data, (float*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_DOUBLE) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(double*)a->data, (double*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_INT) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(int*)a->data, (int*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_FLOAT16) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(__half*)a->data, (__half*)b->data, a->unitNum, (T)num); \
} \
else { \
ShowNTErrors("TODO!"); \
} \
\
BacktoCudaDev(a->devID, devIDBackup); \
} \
template void _Cuda##funcName<int>(const XTensor*, XTensor*, int); \
template void _Cuda##funcName<float>(const XTensor*, XTensor*, float); \
template void _Cuda##funcName<__half>(const XTensor*, XTensor*, __half); \
template void _Cuda##funcName<double>(const XTensor*, XTensor*, double);
#else
#define SIMPLE_BINARY_FUNCTION_GPU(funcName, origFunc) \
template<class T1, class T2> \
__global__ \
void Kernel##funcName(T1 * a, T1 * b, int size, T2 num) \
{ \
int i = blockDim.x * blockIdx.x + threadIdx.x; \
\
if (i < size) \
b[i] = (T1)origFunc((T1)a[i], (T2)num); \
} \
\
template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
\
int gridSize[3]; \
int blockSize[3]; \
\
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); \
\
dim3 blocks(gridSize[0]); \
dim3 threads(blockSize[0]); \
\
int devIDBackup; \
ProtectCudaDev(a->devID, devIDBackup); \
\
if (a->dataType == X_FLOAT) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(float*)a->data, (float*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_DOUBLE) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(double*)a->data, (double*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_INT) { \
hipLaunchKernelGGL(( Kernel##funcName), dim3(blocks), dim3(threads), 0, 0, \
(int*)a->data, (int*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_FLOAT16) { \
ShowNTErrors("Recompile the code with HALF_PRECISION!"); \
} \
else { \
ShowNTErrors("TODO!"); \
} \
\
BacktoCudaDev(a->devID, devIDBackup); \
} \
template void _Cuda##funcName<int>(const XTensor*, XTensor*, int); \
template void _Cuda##funcName<float>(const XTensor*, XTensor*, float); \
template void _Cuda##funcName<__half>(const XTensor*, XTensor*, __half); \
template void _Cuda##funcName<double>(const XTensor*, XTensor*, double);
#endif
SIMPLE_BINARY_FUNCTION_GPU(Descale, BinaryCudaDescale)
SIMPLE_BINARY_FUNCTION_GPU(Mod, BinaryCudaMod)
SIMPLE_BINARY_FUNCTION_GPU(Power, BinaryCudaPower)
SIMPLE_BINARY_FUNCTION_GPU(Scale, BinaryCudaScale)
SIMPLE_BINARY_FUNCTION_GPU(Shift, BinaryCudaShift)
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor)
|
a3f6e551cef05525c150558d1f2d9f418789c876.cu
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: JIANG Yufan (email: [email protected]) 2019-04-05
*/
#include <math.h>
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Binary.h"
#include "Binary.cuh"
#include "cuda_fp16.h"
#include "cublas_api.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
__device__
int BinaryCudaMod(int x, int base)
{
return x % base;
}
template<class T1, class T2>
__device__
T1 BinaryCudaDescale(T1 x, T2 num)
{
return x / T1(num);
}
template<class T1, class T2>
__device__
T1 BinaryCudaPower(T1 x, T2 num)
{
if (T1(num) == T1(0))
return (T1)1.0;
else if (T1(num) == T1(0.5))
return (T1)sqrt((float)x);
else if (T1(num) == T1(2))
return (T1)(x * x);
else {
if (x == T1(0) && T1(num) < T1(0))
return (T1)1e9F;
else
return (T1)pow((float)x, (float)num);
}
}
template<class T1, class T2>
__device__
T1 BinaryCudaScale(T1 x, T2 num)
{
return x * T1(num);
}
template<class T1, class T2>
__device__
T1 BinaryCudaShift(T1 x, T2 num)
{
return x + T1(num);
}
#ifdef HALF_PRECISION
#define SIMPLE_BINARY_FUNCTION_GPU(funcName, origFunc) \
template<class T1, class T2> \
__global__ \
void Kernel##funcName(T1 * a, T1 * b, int size, T2 num) \
{ \
int i = blockDim.x * blockIdx.x + threadIdx.x; \
\
if (i < size) \
b[i] = (T1)origFunc((T1)a[i], (T2)num); \
} \
\
template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
\
int gridSize[3]; \
int blockSize[3]; \
\
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); \
\
dim3 blocks(gridSize[0]); \
dim3 threads(blockSize[0]); \
\
int devIDBackup; \
ProtectCudaDev(a->devID, devIDBackup); \
\
if (a->dataType == X_FLOAT) { \
Kernel##funcName<<<blocks, threads>>> \
((float*)a->data, (float*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_DOUBLE) { \
Kernel##funcName<<<blocks, threads>>> \
((double*)a->data, (double*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_INT) { \
Kernel##funcName<<<blocks, threads>>> \
((int*)a->data, (int*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_FLOAT16) { \
Kernel##funcName<<<blocks, threads>>> \
((__half*)a->data, (__half*)b->data, a->unitNum, (T)num); \
} \
else { \
ShowNTErrors("TODO!"); \
} \
\
BacktoCudaDev(a->devID, devIDBackup); \
} \
template void _Cuda##funcName<int>(const XTensor*, XTensor*, int); \
template void _Cuda##funcName<float>(const XTensor*, XTensor*, float); \
template void _Cuda##funcName<__half>(const XTensor*, XTensor*, __half); \
template void _Cuda##funcName<double>(const XTensor*, XTensor*, double);
#else
#define SIMPLE_BINARY_FUNCTION_GPU(funcName, origFunc) \
template<class T1, class T2> \
__global__ \
void Kernel##funcName(T1 * a, T1 * b, int size, T2 num) \
{ \
int i = blockDim.x * blockIdx.x + threadIdx.x; \
\
if (i < size) \
b[i] = (T1)origFunc((T1)a[i], (T2)num); \
} \
\
template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \
\
int gridSize[3]; \
int blockSize[3]; \
\
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); \
\
dim3 blocks(gridSize[0]); \
dim3 threads(blockSize[0]); \
\
int devIDBackup; \
ProtectCudaDev(a->devID, devIDBackup); \
\
if (a->dataType == X_FLOAT) { \
Kernel##funcName<<<blocks, threads>>> \
((float*)a->data, (float*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_DOUBLE) { \
Kernel##funcName<<<blocks, threads>>> \
((double*)a->data, (double*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_INT) { \
Kernel##funcName<<<blocks, threads>>> \
((int*)a->data, (int*)b->data, a->unitNum, (T)num); \
} \
else if (a->dataType == X_FLOAT16) { \
ShowNTErrors("Recompile the code with HALF_PRECISION!"); \
} \
else { \
ShowNTErrors("TODO!"); \
} \
\
BacktoCudaDev(a->devID, devIDBackup); \
} \
template void _Cuda##funcName<int>(const XTensor*, XTensor*, int); \
template void _Cuda##funcName<float>(const XTensor*, XTensor*, float); \
template void _Cuda##funcName<__half>(const XTensor*, XTensor*, __half); \
template void _Cuda##funcName<double>(const XTensor*, XTensor*, double);
#endif
SIMPLE_BINARY_FUNCTION_GPU(Descale, BinaryCudaDescale)
SIMPLE_BINARY_FUNCTION_GPU(Mod, BinaryCudaMod)
SIMPLE_BINARY_FUNCTION_GPU(Power, BinaryCudaPower)
SIMPLE_BINARY_FUNCTION_GPU(Scale, BinaryCudaScale)
SIMPLE_BINARY_FUNCTION_GPU(Shift, BinaryCudaShift)
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
c0ada5ac99bcbe1f788beb1a982c797832e61957.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const uint8_t* values_in,
uint8_t* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
hipStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
|
c0ada5ac99bcbe1f788beb1a982c797832e61957.cu
|
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const uint8_t* values_in,
uint8_t* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
cudaStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
|
fe78a2c8833283ca2a98d82bcc4f9bbec0005e3d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 500
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a, float b) :r(a), i(b) {}
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex &a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex &a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
hipComplex c(-0.8, -.156);
hipComplex a(jx, jy);
for (int i = 0; i < 200; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
// map from threadIdx/blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int julianValue = julia(x, y);
ptr[offset * 4 + 0] = 255 * julianValue;
ptr[offset * 4 + 1] = 0;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char *d_bitmap;
HANDLE_ERROR(hipMalloc((void**)&d_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1) , 0, 0, d_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), d_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(hipFree(d_bitmap));
}
|
fe78a2c8833283ca2a98d82bcc4f9bbec0005e3d.cu
|
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 500
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a, float b) :r(a), i(b) {}
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex &a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex &a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cuComplex c(-0.8, -.156);
cuComplex a(jx, jy);
for (int i = 0; i < 200; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
// map from threadIdx/blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int julianValue = julia(x, y);
ptr[offset * 4 + 0] = 255 * julianValue;
ptr[offset * 4 + 1] = 0;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char *d_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&d_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
kernel<<<grid, 1 >>>(d_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), d_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(cudaFree(d_bitmap));
}
|
15ef51066d7578cb376b5a01473332900ba6b112.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <distance/epsilon_neighborhood.cuh>
#include <raft/mr/device/allocator.hpp>
#include <random/make_blobs.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename T, typename IdxT>
struct EpsInputs {
IdxT n_row, n_col, n_centers, n_batches;
T eps;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) {
return os;
}
template <typename T, typename IdxT>
class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> {
protected:
void SetUp() override {
param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(data, param.n_row * param.n_col);
raft::allocate(labels, param.n_row);
batchSize = param.n_row / param.n_batches;
raft::allocate(adj, param.n_row * batchSize);
raft::allocate(vd, batchSize + 1, true);
allocator.reset(new raft::mr::device::default_allocator);
Random::make_blobs<T, IdxT>(data, labels, param.n_row, param.n_col,
param.n_centers, allocator, stream, true,
nullptr, nullptr, T(0.01), false);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(adj));
CUDA_CHECK(hipFree(vd));
}
EpsInputs<T, IdxT> param;
hipStream_t stream;
T* data;
bool* adj;
IdxT *labels, *vd;
IdxT batchSize;
std::shared_ptr<raft::mr::device::allocator> allocator;
}; // class EpsNeighTest
const std::vector<EpsInputs<float, int>> inputsfi = {
{15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f},
{15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f},
{15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f},
{15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f},
{20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f},
};
typedef EpsNeighTest<float, int> EpsNeighTestFI;
TEST_P(EpsNeighTestFI, Result) {
for (int i = 0; i < param.n_batches; ++i) {
CUDA_CHECK(
hipMemsetAsync(adj, 0, sizeof(bool) * param.n_row * batchSize, stream));
CUDA_CHECK(hipMemsetAsync(vd, 0, sizeof(int) * (batchSize + 1), stream));
epsUnexpL2SqNeighborhood<float, int>(
adj, vd, data, data + (i * batchSize * param.n_col), param.n_row,
batchSize, param.n_col, param.eps * param.eps, stream);
ASSERT_TRUE(raft::devArrMatch(param.n_row / param.n_centers, vd, batchSize,
raft::Compare<int>(), stream));
}
}
INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI,
::testing::ValuesIn(inputsfi));
}; // namespace Distance
}; // namespace MLCommon
|
15ef51066d7578cb376b5a01473332900ba6b112.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <distance/epsilon_neighborhood.cuh>
#include <raft/mr/device/allocator.hpp>
#include <random/make_blobs.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename T, typename IdxT>
struct EpsInputs {
IdxT n_row, n_col, n_centers, n_batches;
T eps;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) {
return os;
}
template <typename T, typename IdxT>
class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> {
protected:
void SetUp() override {
param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(data, param.n_row * param.n_col);
raft::allocate(labels, param.n_row);
batchSize = param.n_row / param.n_batches;
raft::allocate(adj, param.n_row * batchSize);
raft::allocate(vd, batchSize + 1, true);
allocator.reset(new raft::mr::device::default_allocator);
Random::make_blobs<T, IdxT>(data, labels, param.n_row, param.n_col,
param.n_centers, allocator, stream, true,
nullptr, nullptr, T(0.01), false);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(adj));
CUDA_CHECK(cudaFree(vd));
}
EpsInputs<T, IdxT> param;
cudaStream_t stream;
T* data;
bool* adj;
IdxT *labels, *vd;
IdxT batchSize;
std::shared_ptr<raft::mr::device::allocator> allocator;
}; // class EpsNeighTest
const std::vector<EpsInputs<float, int>> inputsfi = {
{15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f},
{15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f},
{15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f},
{15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f},
{20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f},
};
typedef EpsNeighTest<float, int> EpsNeighTestFI;
TEST_P(EpsNeighTestFI, Result) {
for (int i = 0; i < param.n_batches; ++i) {
CUDA_CHECK(
cudaMemsetAsync(adj, 0, sizeof(bool) * param.n_row * batchSize, stream));
CUDA_CHECK(cudaMemsetAsync(vd, 0, sizeof(int) * (batchSize + 1), stream));
epsUnexpL2SqNeighborhood<float, int>(
adj, vd, data, data + (i * batchSize * param.n_col), param.n_row,
batchSize, param.n_col, param.eps * param.eps, stream);
ASSERT_TRUE(raft::devArrMatch(param.n_row / param.n_centers, vd, batchSize,
raft::Compare<int>(), stream));
}
}
INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI,
::testing::ValuesIn(inputsfi));
}; // namespace Distance
}; // namespace MLCommon
|
33347e3b0b1109760a560c41e9a7747ff768fa3e.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
33347e3b0b1109760a560c41e9a7747ff768fa3e.cu
|
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::minimum<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_minimum_dsrgemm_nn_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
696fd1c31f41b81e4007cf15f09403210d262f8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
#include <stdio.h>
#include <hip/hip_fp16.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ inline void fsort2(float* a, float *b)
{
const float ymm0 = (*a);
const float ymm1 = (*b);
const float ymm3 = fmax(ymm0, ymm1);
(*b) = fmin(ymm0, ymm1);
(*a) = ymm3;
}
__device__ inline void sort6(const float src[6], float dst[6]){
auto d0 = src[0];
auto d1 = src[1];
auto d2 = src[2];
auto d3 = src[3];
auto d4 = src[4];
auto d5 = src[5];
fsort2(&d1, &d2); fsort2(&d0, &d2); fsort2(&d0, &d1); fsort2(&d4, &d5);
fsort2(&d3, &d5); fsort2(&d3, &d4); fsort2(&d0, &d3); fsort2(&d1, &d4);
fsort2(&d2, &d5); fsort2(&d2, &d4); fsort2(&d1, &d3); fsort2(&d2, &d3);
dst[0] = d0;
dst[1] = d1;
dst[2] = d2;
dst[3] = d3;
dst[4] = d4;
dst[5] = d5;
}
#define SWAP_des(x,y) sort2_swap_des(&d##x, &d##y, &p##x, &p##y)
__device__ void sort2_swap_des(float* dx, float* dy, int* px, int* py)
{
const float Dx = *dx, Dy = (*dy);
const int Px = *px, Py = (*py);
const bool test = (Dx > Dy);
(*dx) = fmaxf(Dx,Dy);
(*dy) = fminf(Dx,Dy);
(*px) = test ? Px : Py;
(*py) = test ? Py : Px;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
#define SWAP_asc(x,y) sort2_swap_asc(&d##x, &d##y, &p##x, &p##y)
__device__ void sort2_swap_asc(float* dx, float* dy, int* px, int* py)
{
const float Dx = *dx, Dy = (*dy);
const int Px = *px, Py = (*py);
const bool test = (Dx < Dy);
(*dx) = fminf(Dx,Dy);
(*dy) = fmaxf(Dx,Dy);
(*px) = test ? Px : Py;
(*py) = test ? Py : Px;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_swap(float d[6], int p[6])
{
#define SWAP SWAP_des
int p0 = 0; int p1 = 1; int p2 = 2;
int p3 = 3; int p4 = 4; int p5 = 5;
float d0 = d[0]; float d1 = d[1]; float d2 = d[2];
float d3 = d[3]; float d4 = d[4]; float d5 = d[5];
SWAP(1, 2); SWAP(0, 2); SWAP(0, 1); SWAP(4, 5);
SWAP(3, 5); SWAP(3, 4); SWAP(0, 3); SWAP(1, 4);
SWAP(2, 5); SWAP(2, 4); SWAP(1, 3); SWAP(2, 3);
d[0] = d0; d[1] = d1; d[2] = d2; d[3] = d3; d[4] = d4; d[5] = d5;
p[0] = p0; p[1] = p1; p[2] = p2; p[3] = p3; p[4] = p4; p[5] = p5;
#undef SWAP
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_swap(float illr[6], float rllr[6], int ipos[6], int rpos[6])
{
#define SWAP SWAP_asc
int p0 = ipos[0]; int p1 = ipos[1]; int p2 = ipos[2];
int p3 = ipos[3]; int p4 = ipos[4]; int p5 = ipos[5];
float d0 = illr[0]; float d1 = illr[1]; float d2 = illr[2];
float d3 = illr[3]; float d4 = illr[4]; float d5 = illr[5];
SWAP(1, 2); SWAP(0, 2); SWAP(0, 1); SWAP(4, 5);
SWAP(3, 5); SWAP(3, 4); SWAP(0, 3); SWAP(1, 4);
SWAP(2, 5); SWAP(2, 4); SWAP(1, 3); SWAP(2, 3);
rllr[0] = d0; rllr[1] = d1; rllr[2] = d2; rllr[3] = d3; rllr[4] = d4; rllr[5] = d5;
rpos[0] = p0; rpos[1] = p1; rpos[2] = p2; rpos[3] = p3; rpos[4] = p4; rpos[5] = p5;
#undef SWAP
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_rank_order_reg(float llr[ ], int pos[ ])
{
const float x0 = llr[0]; const float x1 = llr[1]; const float x2 = llr[2];
const float x3 = llr[3]; const float x4 = llr[4]; const float x5 = llr[5];
const int o0 = (x0< x1) + (x0< x2) + (x0< x3) + (x0< x4) + (x0<x5);
const int o1 = (x1<=x0) + (x1< x2) + (x1< x3) + (x1< x4) + (x1<x5);
const int o2 = (x2<=x0) + (x2<=x1) + (x2< x3) + (x2< x4) + (x2<x5);
const int o3 = (x3<=x0) + (x3<=x1) + (x3<=x2) + (x3< x4) + (x3<x5);
const int o4 = (x4<=x0) + (x4<=x1) + (x4<=x2) + (x4<=x3) + (x4<x5);
const int o5 = 15 - (o0 + o1 + o2 + o3 + o4);
llr[o0]=x0; llr[o1]=x1; llr[o2]=x2; llr[o3]=x3; llr[o4]=x4; llr[o5]=x5;
pos[o0]= 0; pos[o1]= 1; pos[o2]= 2; pos[o3]= 3; pos[o4]= 4; pos[o5]= 5;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_rank_order_reg_modif(float illr[ ], float rllr[ ], int ipos[ ], int rpos[ ])
{
const float x0 = illr[0], x1 = illr[1], x2 = illr[2];
const float x3 = illr[3], x4 = illr[4], x5 = illr[5];
const int o0 = (x0> x1) + (x0> x2) + (x0> x3) + (x0> x4) + (x0>x5);
const int o1 = (x1>=x0) + (x1> x2) + (x1> x3) + (x1> x4) + (x1>x5);
const int o2 = (x2>=x0) + (x2>=x1) + (x2> x3) + (x2> x4) + (x2>x5);
const int o3 = (x3>=x0) + (x3>=x1) + (x3>=x2) + (x3> x4) + (x3>x5);
const int o4 = (x4>=x0) + (x4>=x1) + (x4>=x2) + (x4>=x3) + (x4>x5);
const int o5 = 15 - (o0 + o1 + o2 + o3 + o4);
rllr[o0]=x0; rllr[o1]=x1; rllr[o2]=x2; rllr[o3]=x3; rllr[o4]=x4; rllr[o5]=x5;
rpos[o0]=ipos[0]; rpos[o1]=ipos[1]; rpos[o2]=ipos[2]; rpos[o3]=ipos[3]; rpos[o4]=ipos[4]; rpos[o5]=ipos[5];
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__shared__ int sdata[128*6]; // > 512
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void projection_deg6(float llr[], float results[])
{
const int length = 6;
bool finished = false;
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int AllZero = (llr[0] <= 0);
int AllOne = (llr[0] <= 0);
#pragma unroll
for(int i = 1; i < length; i++)
{
AllZero = AllZero + (llr[i] <= 0);
AllOne = AllOne + (llr[i] <= 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
finished = (AllZero == length);
__syncthreads( );
/////////////////////////////////////////////////////////////////////////////////////////////////////////
bool test = (finished == false) && (AllOne == length) && ((length&0x01) == 0);
#pragma unroll
for(int i = 0; i < length; i++)
results[i] = (test == true) ? 1.0f : 0.0f;
finished = finished | test;
__syncthreads( );
/////////////////////////////////////////////////////////////////////////////////////////////////////////
float constituent = 0;
float llrClip[6];
int zSorti[6] = {0, 1, 2, 3, 4, 5};
sort6_swap(llr, zSorti);
#pragma unroll
for(int i = 0; i < length; i++)// project on the [0,1]^d cube
{
const float vMax = fminf(fmaxf(llr[i], 0.0f), 1.0f);
llrClip[i] = vMax;
constituent += vMax;
}
int r = (int)constituent;
r = r & 0xFFFFFFFE;//- (r & 0x01);
float sum_Clip = llrClip[0];
for(int i = 1; i < length; i++)
{
sum_Clip += (i < r+1) ? llrClip[i] : -llrClip[i];
}
// affectation conditionnelle des resultats
bool valid = ( finished == false ) && (sum_Clip <= r);
#pragma unroll
for(int i = 0; i < length; i++)
results[zSorti[i]] = (valid == true) ? llrClip[i] : results[zSorti[i]];
finished = finished || valid;
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
/////////////////////////////////////////////////////////////////////////////////////////////////////////
if( finished == false )
{
float beta = 0;
float beta_max = (r + 2 <= length) ? (llr[r] - llr[r+1])/2 : llr[r]; // assign beta_max
// sorting zBetaRep
int zSorti_m[6] = {0, 1, 2, 3, 4, 5};
float T_in[6];
float T_out[6];
int order_out[6];
#pragma unroll
for(int i = 0; i < length; i++)
T_in[i] = (i < r+1) ? llr[i] - 1.0f : -llr[i];
sort6_rank_order_reg_modif (T_in, T_out, zSorti_m, order_out);
int clip_idx = -1;
int zero_idx = 0;
int idx_start = 0;
int idx_end = -1;
#pragma unroll 6
for(int i = 0; i < length; i++)
{
clip_idx += (llr[i] > 1.0f);
zero_idx += (llr[i] >= -1e-10f);
idx_start += (T_out[i] < 1e-10f);
idx_end += (T_out[i] < beta_max);
}
float active_sum = 0;
#pragma unroll 6
for(int i = 0;i < length; i++)
{
active_sum += (i > clip_idx && i <= r ) ? llr[i] : 0.0f;
active_sum -= (i > r && i < zero_idx) ? llr[i] : 0.0f;
}
float total_sum = active_sum + clip_idx + 1;
int previous_clip_idx = clip_idx;
int previous_zero_idx = zero_idx;
float previous_active_sum = active_sum;
bool change_pre = false;
for(int i = idx_start; i <= idx_end; i++)// pour tous les beta entre 0 et beta_max
{
if(change_pre)
{
// save previous things
previous_clip_idx = clip_idx;
previous_zero_idx = zero_idx;
previous_active_sum = active_sum;
}
change_pre = false;
beta = T_out[i];
clip_idx -= (order_out[i] <= r);
zero_idx += (order_out[i] > r);
active_sum += (order_out[i] <= r) ? llr[order_out[i]] : -llr[order_out[i]];
if (i < length - 1)
{
if (beta != T_out[i+1])
{
total_sum = (clip_idx + 1) + active_sum - beta * (zero_idx - clip_idx - 1);
change_pre = true;
if(total_sum < r)
break;
}
}
else if (i == length - 1)
{
total_sum = (clip_idx + 1) + active_sum - beta * (zero_idx - clip_idx - 1);
change_pre = true;
}
}
clip_idx = (total_sum > r) ? clip_idx : previous_clip_idx;
active_sum = (total_sum > r) ? active_sum : previous_active_sum;
zero_idx = (total_sum > r) ? zero_idx : previous_zero_idx;
beta = -(r - clip_idx - 1 - active_sum)/(zero_idx - clip_idx - 1);
#pragma unroll 6
for(int i = 0; i < length; i++)
{
const float vA = llr[i];
const float vD = (i <= r) ? vA - beta : vA + beta;
results[zSorti[i]] = fminf(fmaxf(vD, 0.0f), 1.0f);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
/////////////////////////////////////////////////////////////////////////////////////////////////////////
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_InitArrays_16b(float* LZr, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
__half t1 = __float2half ( 0.00f ); // Lambda
__half t2 = __float2half ( 0.50f ); // zReplica
__half2* ptr = reinterpret_cast<__half2*>(LZr);
ptr[i] = __halves2half2( t1, t2 );
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_VN_kernel_deg3_16b(
const float* _LogLikelihoodRatio, float* OutputFromDecoder, float* LZr, const unsigned int *t_row, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const float mu = 3.0f;
const float alpha = 0.8;
const float _amu_ = alpha / mu;
const float _2_amu_ = _amu_+ _amu_;
const float factor = 1.0f / (3.0f - _2_amu_);
const int degVn = 3;
const __half2* ptr = reinterpret_cast<__half2*>(LZr);
if (i < N){
float temp = -_LogLikelihoodRatio[i];
const int frame_offset = (i%2640);
const int num_trame = (i/2640);
const ushort4 off = reinterpret_cast<ushort4*>((unsigned int *)t_row)[ frame_offset ];
const unsigned short tab[4] = {off.x, off.y, off.z, off.w};
#pragma unroll 3
for(int k = 0; k < degVn; k++)
{
const int off = tab[k];
const __half2 data = ptr[ (7920 * num_trame) + off ];
temp += ( __high2float(data) + __low2float(data) );
}
const float xx = (temp - _amu_) * factor;
OutputFromDecoder[i] = fmaxf(fminf(xx, 1.0f), 0.0f);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_CN_kernel_deg6_16b(
const float *OutputFromDecoder, float *Lzr, const unsigned int *t_col1, int *cn_synrome, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x; // NUMERO DU CHECK NODE A CALCULER
const float rho = 1.9f;
const float un_m_rho = 1.0f - rho;
const int degCn = 6;
float v_proj[6];
float ztemp [6];
__half2* ptr = reinterpret_cast<__half2*>(Lzr);
float* PTR = reinterpret_cast<float*>(sdata);
if (i < N){
const int frame_offset = i%1320;
const int trame_start = 2640 * (i/1320);
int syndrom = 0;
#if 1
unsigned short* cptr = (unsigned short*)t_col1;//)[]);
const uint3 offset = reinterpret_cast<uint3*>( cptr )[ frame_offset ];
const unsigned int TAB[3] = {offset.x, offset.y, offset.z};
const unsigned short* tab = (const unsigned short*)TAB;
#endif
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const int ind = degCn * i + k;
#if 1
const float xpred = OutputFromDecoder[ trame_start + tab[ k ] ];
#else
const int offset = t_col1[ degCn * frame_offset + k ];
const float xpred = OutputFromDecoder[ trame_start + offset ];
#endif
syndrom += (xpred > 0.5);
PTR[threadIdx.x + 128 * k] = xpred;
const __half2 data = ptr[ ind ];
v_proj[k] = (rho * xpred) + (un_m_rho * __high2float(data)) - __low2float(data);
}
cn_synrome[i] = syndrom & 0x01;
projection_deg6(v_proj, ztemp);
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const int ind = degCn * i + k;
const float xpred = PTR[threadIdx.x + 128 * k];
const __half2 data = ptr[ ind ];
float x = __low2float(data) + (rho * (ztemp[k] - xpred) + un_m_rho * (ztemp[k] - __high2float(data)));
ptr[ ind ] = __halves2half2( __float2half(x), __float2half(ztemp[k]) );
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_VN_kernel_deg3_16b_mod(
const float* _LogLikelihoodRatio, float* OutputFromDecoder, float* LZr, const unsigned int *t_row, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const float mu = 3.0f;
const float alpha = 0.8;
const float _amu_ = alpha / mu;
const float _2_amu_ = _amu_+ _amu_;
const float factor = 1.0f / (3.0f - _2_amu_);
const int degVn = 3;
const __half2* ptr = reinterpret_cast<__half2*>(LZr);
if (i < N){
float temp = -_LogLikelihoodRatio[i];
const int frame_offset = (i%2640);
const int num_trame = (i/2640);
const ushort4 off = reinterpret_cast<ushort4*>((unsigned int *)t_row)[ frame_offset ];
const unsigned short tab[4] = {off.x, off.y, off.z, off.w};
#pragma unroll 3
for(int k = 0; k < degVn; k++)
{
const int off = tab[k];
const __half2 data = ptr[ (8448 * num_trame) + off ];
temp += ( __high2float(data) + __low2float(data) );
}
const float xx = (temp - _amu_) * factor;
OutputFromDecoder[i] = fmaxf(fminf(xx, 1.0f), 0.0f);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void proj_deg6_v2(float llr[], float v_clip[])
{
const int length = 6;
#pragma unroll 6
for(int i = 0;i < length; i++)
{
v_clip[i] = fmin(fmax(llr[i], 0.0f), 1.0f);
}
int sum_f = 0;
float f[length];
#pragma unroll 6
for(int i = 0;i < length; i++)
{
const float value = (v_clip[i] > 0.5f);
f[i] = value;
sum_f += (int)value;
}
int is_even = (int)sum_f & 0x01;
int indice = 0;
float minimum = fabs( (0.5f - v_clip[0]) );// < 0 ? (v_clip[0]-0.5) : (0.5f-v_clip[0]);
#pragma unroll 6
for(int i = 1; i < length; i++)
{
float tmp = fabs( 0.5f - v_clip[i] );//(tmp < 0) ? -tmp : tmp;
indice = (tmp < minimum)? i : indice;
}
if (is_even == 0)
{
f[indice] = 1 - f[indice];
}
float v_T[length];
#pragma unroll 6
for(int i = 0; i < length; i++)
{
const float value = 1.0f - llr[i];
v_T[i] = (f[i] == 1) ? value : llr[i];
}
int sum_v_T= 0;
#pragma unroll 6
for(int i = 0;i < length; i++)
{
sum_v_T += fmin(fmax(v_T[i], 0.0f), 1.0f);
}
if ( sum_v_T >= 1.0f )
{
return;
}
float sorted[length];
sort6( v_T, sorted );
float sum_Mu=0;
float s[length];
#pragma unroll 6
for(int i = 0; i < length; i++)
{
sum_Mu += sorted[i];
s[i] = (sum_Mu - 1.0f) / (1 + i);
}
// get Rho
float sRho = s[0];
#pragma unroll 5
for(int i = 1; i < length; i++)
{
sRho = (sorted[i] > s[i]) ? s[i] : sRho;
}
float u[length];
#pragma unroll 6
for(int i = 0;i < length; i++)
{
const float ui = fmax (v_T[i] - sRho, 0.0f);
v_clip[i] = ( f[i] == 1 ) ? (1.0f - ui) : ui;
}
// #pragma unroll 6
// for(int i = 0;i < length; i++)
// {
// v_clip[i] = u_T[i];
// }
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_CN_kernel_deg6_16b_mod(
const float *OutputFromDecoder, float *Lzr, const unsigned int *t_col1, int *cn_synrome, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x; // NUMERO DU CHECK NODE A CALCULER
const float rho = 1.9f;
const float un_m_rho = 1.0f - rho;
const int degCn = 6;
float v_proj[6];
float ztemp [6];
__half2* ptr = reinterpret_cast<__half2*>(Lzr);
float* PTR = reinterpret_cast<float*>(sdata);
if (i < N){
const int frame_id = i/1320;
const int frame_offset = i%1320;
const int trame_start = 2640 * (i/1320);
const int IND = 8448 * frame_id; // offset to access mesages from current frame
const int indice = IND + 768 * (frame_offset/128) + frame_offset%128;
int syndrom = 0;
unsigned short* cptr = (unsigned short*)t_col1;//)[]);
const uint3 offset = reinterpret_cast<uint3*>( cptr )[ frame_offset ];
const unsigned int TAB[3] = {offset.x, offset.y, offset.z};
const unsigned short* tab = (const unsigned short*)TAB;
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float xpred = OutputFromDecoder[ trame_start + tab[ k ] ];
syndrom += (xpred > 0.5);
const __half2 data = ptr[ indice +128 * k ];
const auto contribution = (rho * xpred) + (un_m_rho * __high2float(data)) - __low2float(data);
v_proj[k] = contribution;
PTR[threadIdx.x + 128 * k] = contribution;
}
cn_synrome[i] = syndrom & 0x01;
proj_deg6_v2(v_proj, ztemp);
//projection_deg6(v_proj, ztemp);
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float contr = PTR[threadIdx.x + 128 * k];
float x = ztemp[k] - contr;
ptr[ indice +128 * k ] = __halves2half2( __float2half(x), __float2half(ztemp[k]) );
}
}
}
__global__ void ADMM_CN_kernel_deg6_16b_mod2(
const float *OutputFromDecoder, float *Lzr, const unsigned int *t_col1, int *cn_synrome, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // NUMERO DU CHECK NODE A CALCULER
const float rho = 1.9f;
const float un_m_rho = 1.0f - rho;
const int degCn = 6;
float v_proj[6];
float ztemp [6];
__half2* ptr = reinterpret_cast<__half2*>(Lzr);
float* PTR = reinterpret_cast<float*>(sdata);
if (i < N){
const int frame_id = i/1320;
const int frame_offset = i%1320;
const int trame_start = 2640 * (i/1320);
const int IND = 8448 * frame_id; // offset to access mesages from current frame
const int indice = IND + 768 * (frame_offset/128) + frame_offset%128;
int syndrom = 0;
unsigned short* cptr = (unsigned short*)t_col1;//)[]);
const uint3 offset = reinterpret_cast<uint3*>( cptr )[ frame_offset ];
const unsigned int TAB[3] = {offset.x, offset.y, offset.z};
const unsigned short* tab = (const unsigned short*)TAB;
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float xpred = OutputFromDecoder[ trame_start + tab[ k ] ];
syndrom += (xpred > 0.5);
const __half2 data = ptr[ indice +128 * k ];
const auto contribution = (rho * xpred) + (un_m_rho * __high2float(data)) - __low2float(data);
v_proj[k] = contribution;
PTR[threadIdx.x + 128 * k] = contribution;
}
cn_synrome[i] = syndrom & 0x01;
projection_deg6(v_proj, ztemp);
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float contr = PTR[threadIdx.x + 128 * k];
float x = ztemp[k] - contr;
ptr[ indice +128 * k ] = __halves2half2( __float2half(x), __float2half(ztemp[k]) );
}
}
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < N)
{
mySum += cn_synrome[i];
// ensure we don't read out of bounds
if (i + blockDim.x < N)
mySum += cn_synrome[i+blockDim.x];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
// avoid bank conflict
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile int* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
cn_synrome[blockIdx.x] = sdata[0];
}
|
696fd1c31f41b81e4007cf15f09403210d262f8a.cu
|
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
#include <stdio.h>
#include <cuda_fp16.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ inline void fsort2(float* a, float *b)
{
const float ymm0 = (*a);
const float ymm1 = (*b);
const float ymm3 = fmax(ymm0, ymm1);
(*b) = fmin(ymm0, ymm1);
(*a) = ymm3;
}
__device__ inline void sort6(const float src[6], float dst[6]){
auto d0 = src[0];
auto d1 = src[1];
auto d2 = src[2];
auto d3 = src[3];
auto d4 = src[4];
auto d5 = src[5];
fsort2(&d1, &d2); fsort2(&d0, &d2); fsort2(&d0, &d1); fsort2(&d4, &d5);
fsort2(&d3, &d5); fsort2(&d3, &d4); fsort2(&d0, &d3); fsort2(&d1, &d4);
fsort2(&d2, &d5); fsort2(&d2, &d4); fsort2(&d1, &d3); fsort2(&d2, &d3);
dst[0] = d0;
dst[1] = d1;
dst[2] = d2;
dst[3] = d3;
dst[4] = d4;
dst[5] = d5;
}
#define SWAP_des(x,y) sort2_swap_des(&d##x, &d##y, &p##x, &p##y)
__device__ void sort2_swap_des(float* dx, float* dy, int* px, int* py)
{
const float Dx = *dx, Dy = (*dy);
const int Px = *px, Py = (*py);
const bool test = (Dx > Dy);
(*dx) = fmaxf(Dx,Dy);
(*dy) = fminf(Dx,Dy);
(*px) = test ? Px : Py;
(*py) = test ? Py : Px;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
#define SWAP_asc(x,y) sort2_swap_asc(&d##x, &d##y, &p##x, &p##y)
__device__ void sort2_swap_asc(float* dx, float* dy, int* px, int* py)
{
const float Dx = *dx, Dy = (*dy);
const int Px = *px, Py = (*py);
const bool test = (Dx < Dy);
(*dx) = fminf(Dx,Dy);
(*dy) = fmaxf(Dx,Dy);
(*px) = test ? Px : Py;
(*py) = test ? Py : Px;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_swap(float d[6], int p[6])
{
#define SWAP SWAP_des
int p0 = 0; int p1 = 1; int p2 = 2;
int p3 = 3; int p4 = 4; int p5 = 5;
float d0 = d[0]; float d1 = d[1]; float d2 = d[2];
float d3 = d[3]; float d4 = d[4]; float d5 = d[5];
SWAP(1, 2); SWAP(0, 2); SWAP(0, 1); SWAP(4, 5);
SWAP(3, 5); SWAP(3, 4); SWAP(0, 3); SWAP(1, 4);
SWAP(2, 5); SWAP(2, 4); SWAP(1, 3); SWAP(2, 3);
d[0] = d0; d[1] = d1; d[2] = d2; d[3] = d3; d[4] = d4; d[5] = d5;
p[0] = p0; p[1] = p1; p[2] = p2; p[3] = p3; p[4] = p4; p[5] = p5;
#undef SWAP
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_swap(float illr[6], float rllr[6], int ipos[6], int rpos[6])
{
#define SWAP SWAP_asc
int p0 = ipos[0]; int p1 = ipos[1]; int p2 = ipos[2];
int p3 = ipos[3]; int p4 = ipos[4]; int p5 = ipos[5];
float d0 = illr[0]; float d1 = illr[1]; float d2 = illr[2];
float d3 = illr[3]; float d4 = illr[4]; float d5 = illr[5];
SWAP(1, 2); SWAP(0, 2); SWAP(0, 1); SWAP(4, 5);
SWAP(3, 5); SWAP(3, 4); SWAP(0, 3); SWAP(1, 4);
SWAP(2, 5); SWAP(2, 4); SWAP(1, 3); SWAP(2, 3);
rllr[0] = d0; rllr[1] = d1; rllr[2] = d2; rllr[3] = d3; rllr[4] = d4; rllr[5] = d5;
rpos[0] = p0; rpos[1] = p1; rpos[2] = p2; rpos[3] = p3; rpos[4] = p4; rpos[5] = p5;
#undef SWAP
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_rank_order_reg(float llr[ ], int pos[ ])
{
const float x0 = llr[0]; const float x1 = llr[1]; const float x2 = llr[2];
const float x3 = llr[3]; const float x4 = llr[4]; const float x5 = llr[5];
const int o0 = (x0< x1) + (x0< x2) + (x0< x3) + (x0< x4) + (x0<x5);
const int o1 = (x1<=x0) + (x1< x2) + (x1< x3) + (x1< x4) + (x1<x5);
const int o2 = (x2<=x0) + (x2<=x1) + (x2< x3) + (x2< x4) + (x2<x5);
const int o3 = (x3<=x0) + (x3<=x1) + (x3<=x2) + (x3< x4) + (x3<x5);
const int o4 = (x4<=x0) + (x4<=x1) + (x4<=x2) + (x4<=x3) + (x4<x5);
const int o5 = 15 - (o0 + o1 + o2 + o3 + o4);
llr[o0]=x0; llr[o1]=x1; llr[o2]=x2; llr[o3]=x3; llr[o4]=x4; llr[o5]=x5;
pos[o0]= 0; pos[o1]= 1; pos[o2]= 2; pos[o3]= 3; pos[o4]= 4; pos[o5]= 5;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void sort6_rank_order_reg_modif(float illr[ ], float rllr[ ], int ipos[ ], int rpos[ ])
{
const float x0 = illr[0], x1 = illr[1], x2 = illr[2];
const float x3 = illr[3], x4 = illr[4], x5 = illr[5];
const int o0 = (x0> x1) + (x0> x2) + (x0> x3) + (x0> x4) + (x0>x5);
const int o1 = (x1>=x0) + (x1> x2) + (x1> x3) + (x1> x4) + (x1>x5);
const int o2 = (x2>=x0) + (x2>=x1) + (x2> x3) + (x2> x4) + (x2>x5);
const int o3 = (x3>=x0) + (x3>=x1) + (x3>=x2) + (x3> x4) + (x3>x5);
const int o4 = (x4>=x0) + (x4>=x1) + (x4>=x2) + (x4>=x3) + (x4>x5);
const int o5 = 15 - (o0 + o1 + o2 + o3 + o4);
rllr[o0]=x0; rllr[o1]=x1; rllr[o2]=x2; rllr[o3]=x3; rllr[o4]=x4; rllr[o5]=x5;
rpos[o0]=ipos[0]; rpos[o1]=ipos[1]; rpos[o2]=ipos[2]; rpos[o3]=ipos[3]; rpos[o4]=ipos[4]; rpos[o5]=ipos[5];
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__shared__ int sdata[128*6]; // > 512
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void projection_deg6(float llr[], float results[])
{
const int length = 6;
bool finished = false;
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int AllZero = (llr[0] <= 0);
int AllOne = (llr[0] <= 0);
#pragma unroll
for(int i = 1; i < length; i++)
{
AllZero = AllZero + (llr[i] <= 0);
AllOne = AllOne + (llr[i] <= 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
finished = (AllZero == length);
__syncthreads( );
/////////////////////////////////////////////////////////////////////////////////////////////////////////
bool test = (finished == false) && (AllOne == length) && ((length&0x01) == 0);
#pragma unroll
for(int i = 0; i < length; i++)
results[i] = (test == true) ? 1.0f : 0.0f;
finished = finished | test;
__syncthreads( );
/////////////////////////////////////////////////////////////////////////////////////////////////////////
float constituent = 0;
float llrClip[6];
int zSorti[6] = {0, 1, 2, 3, 4, 5};
sort6_swap(llr, zSorti);
#pragma unroll
for(int i = 0; i < length; i++)// project on the [0,1]^d cube
{
const float vMax = fminf(fmaxf(llr[i], 0.0f), 1.0f);
llrClip[i] = vMax;
constituent += vMax;
}
int r = (int)constituent;
r = r & 0xFFFFFFFE;//- (r & 0x01);
float sum_Clip = llrClip[0];
for(int i = 1; i < length; i++)
{
sum_Clip += (i < r+1) ? llrClip[i] : -llrClip[i];
}
// affectation conditionnelle des resultats
bool valid = ( finished == false ) && (sum_Clip <= r);
#pragma unroll
for(int i = 0; i < length; i++)
results[zSorti[i]] = (valid == true) ? llrClip[i] : results[zSorti[i]];
finished = finished || valid;
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
/////////////////////////////////////////////////////////////////////////////////////////////////////////
if( finished == false )
{
float beta = 0;
float beta_max = (r + 2 <= length) ? (llr[r] - llr[r+1])/2 : llr[r]; // assign beta_max
// sorting zBetaRep
int zSorti_m[6] = {0, 1, 2, 3, 4, 5};
float T_in[6];
float T_out[6];
int order_out[6];
#pragma unroll
for(int i = 0; i < length; i++)
T_in[i] = (i < r+1) ? llr[i] - 1.0f : -llr[i];
sort6_rank_order_reg_modif (T_in, T_out, zSorti_m, order_out);
int clip_idx = -1;
int zero_idx = 0;
int idx_start = 0;
int idx_end = -1;
#pragma unroll 6
for(int i = 0; i < length; i++)
{
clip_idx += (llr[i] > 1.0f);
zero_idx += (llr[i] >= -1e-10f);
idx_start += (T_out[i] < 1e-10f);
idx_end += (T_out[i] < beta_max);
}
float active_sum = 0;
#pragma unroll 6
for(int i = 0;i < length; i++)
{
active_sum += (i > clip_idx && i <= r ) ? llr[i] : 0.0f;
active_sum -= (i > r && i < zero_idx) ? llr[i] : 0.0f;
}
float total_sum = active_sum + clip_idx + 1;
int previous_clip_idx = clip_idx;
int previous_zero_idx = zero_idx;
float previous_active_sum = active_sum;
bool change_pre = false;
for(int i = idx_start; i <= idx_end; i++)// pour tous les beta entre 0 et beta_max
{
if(change_pre)
{
// save previous things
previous_clip_idx = clip_idx;
previous_zero_idx = zero_idx;
previous_active_sum = active_sum;
}
change_pre = false;
beta = T_out[i];
clip_idx -= (order_out[i] <= r);
zero_idx += (order_out[i] > r);
active_sum += (order_out[i] <= r) ? llr[order_out[i]] : -llr[order_out[i]];
if (i < length - 1)
{
if (beta != T_out[i+1])
{
total_sum = (clip_idx + 1) + active_sum - beta * (zero_idx - clip_idx - 1);
change_pre = true;
if(total_sum < r)
break;
}
}
else if (i == length - 1)
{
total_sum = (clip_idx + 1) + active_sum - beta * (zero_idx - clip_idx - 1);
change_pre = true;
}
}
clip_idx = (total_sum > r) ? clip_idx : previous_clip_idx;
active_sum = (total_sum > r) ? active_sum : previous_active_sum;
zero_idx = (total_sum > r) ? zero_idx : previous_zero_idx;
beta = -(r - clip_idx - 1 - active_sum)/(zero_idx - clip_idx - 1);
#pragma unroll 6
for(int i = 0; i < length; i++)
{
const float vA = llr[i];
const float vD = (i <= r) ? vA - beta : vA + beta;
results[zSorti[i]] = fminf(fmaxf(vD, 0.0f), 1.0f);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
/////////////////////////////////////////////////////////////////////////////////////////////////////////
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_InitArrays_16b(float* LZr, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
__half t1 = __float2half ( 0.00f ); // Lambda
__half t2 = __float2half ( 0.50f ); // zReplica
__half2* ptr = reinterpret_cast<__half2*>(LZr);
ptr[i] = __halves2half2( t1, t2 );
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_VN_kernel_deg3_16b(
const float* _LogLikelihoodRatio, float* OutputFromDecoder, float* LZr, const unsigned int *t_row, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const float mu = 3.0f;
const float alpha = 0.8;
const float _amu_ = alpha / mu;
const float _2_amu_ = _amu_+ _amu_;
const float factor = 1.0f / (3.0f - _2_amu_);
const int degVn = 3;
const __half2* ptr = reinterpret_cast<__half2*>(LZr);
if (i < N){
float temp = -_LogLikelihoodRatio[i];
const int frame_offset = (i%2640);
const int num_trame = (i/2640);
const ushort4 off = reinterpret_cast<ushort4*>((unsigned int *)t_row)[ frame_offset ];
const unsigned short tab[4] = {off.x, off.y, off.z, off.w};
#pragma unroll 3
for(int k = 0; k < degVn; k++)
{
const int off = tab[k];
const __half2 data = ptr[ (7920 * num_trame) + off ];
temp += ( __high2float(data) + __low2float(data) );
}
const float xx = (temp - _amu_) * factor;
OutputFromDecoder[i] = fmaxf(fminf(xx, 1.0f), 0.0f);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_CN_kernel_deg6_16b(
const float *OutputFromDecoder, float *Lzr, const unsigned int *t_col1, int *cn_synrome, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x; // NUMERO DU CHECK NODE A CALCULER
const float rho = 1.9f;
const float un_m_rho = 1.0f - rho;
const int degCn = 6;
float v_proj[6];
float ztemp [6];
__half2* ptr = reinterpret_cast<__half2*>(Lzr);
float* PTR = reinterpret_cast<float*>(sdata);
if (i < N){
const int frame_offset = i%1320;
const int trame_start = 2640 * (i/1320);
int syndrom = 0;
#if 1
unsigned short* cptr = (unsigned short*)t_col1;//)[]);
const uint3 offset = reinterpret_cast<uint3*>( cptr )[ frame_offset ];
const unsigned int TAB[3] = {offset.x, offset.y, offset.z};
const unsigned short* tab = (const unsigned short*)TAB;
#endif
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const int ind = degCn * i + k;
#if 1
const float xpred = OutputFromDecoder[ trame_start + tab[ k ] ];
#else
const int offset = t_col1[ degCn * frame_offset + k ];
const float xpred = OutputFromDecoder[ trame_start + offset ];
#endif
syndrom += (xpred > 0.5);
PTR[threadIdx.x + 128 * k] = xpred;
const __half2 data = ptr[ ind ];
v_proj[k] = (rho * xpred) + (un_m_rho * __high2float(data)) - __low2float(data);
}
cn_synrome[i] = syndrom & 0x01;
projection_deg6(v_proj, ztemp);
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const int ind = degCn * i + k;
const float xpred = PTR[threadIdx.x + 128 * k];
const __half2 data = ptr[ ind ];
float x = __low2float(data) + (rho * (ztemp[k] - xpred) + un_m_rho * (ztemp[k] - __high2float(data)));
ptr[ ind ] = __halves2half2( __float2half(x), __float2half(ztemp[k]) );
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_VN_kernel_deg3_16b_mod(
const float* _LogLikelihoodRatio, float* OutputFromDecoder, float* LZr, const unsigned int *t_row, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const float mu = 3.0f;
const float alpha = 0.8;
const float _amu_ = alpha / mu;
const float _2_amu_ = _amu_+ _amu_;
const float factor = 1.0f / (3.0f - _2_amu_);
const int degVn = 3;
const __half2* ptr = reinterpret_cast<__half2*>(LZr);
if (i < N){
float temp = -_LogLikelihoodRatio[i];
const int frame_offset = (i%2640);
const int num_trame = (i/2640);
const ushort4 off = reinterpret_cast<ushort4*>((unsigned int *)t_row)[ frame_offset ];
const unsigned short tab[4] = {off.x, off.y, off.z, off.w};
#pragma unroll 3
for(int k = 0; k < degVn; k++)
{
const int off = tab[k];
const __half2 data = ptr[ (8448 * num_trame) + off ];
temp += ( __high2float(data) + __low2float(data) );
}
const float xx = (temp - _amu_) * factor;
OutputFromDecoder[i] = fmaxf(fminf(xx, 1.0f), 0.0f);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ void proj_deg6_v2(float llr[], float v_clip[])
{
const int length = 6;
#pragma unroll 6
for(int i = 0;i < length; i++)
{
v_clip[i] = fmin(fmax(llr[i], 0.0f), 1.0f);
}
int sum_f = 0;
float f[length];
#pragma unroll 6
for(int i = 0;i < length; i++)
{
const float value = (v_clip[i] > 0.5f);
f[i] = value;
sum_f += (int)value;
}
int is_even = (int)sum_f & 0x01;
int indice = 0;
float minimum = fabs( (0.5f - v_clip[0]) );// < 0 ? (v_clip[0]-0.5) : (0.5f-v_clip[0]);
#pragma unroll 6
for(int i = 1; i < length; i++)
{
float tmp = fabs( 0.5f - v_clip[i] );//(tmp < 0) ? -tmp : tmp;
indice = (tmp < minimum)? i : indice;
}
if (is_even == 0)
{
f[indice] = 1 - f[indice];
}
float v_T[length];
#pragma unroll 6
for(int i = 0; i < length; i++)
{
const float value = 1.0f - llr[i];
v_T[i] = (f[i] == 1) ? value : llr[i];
}
int sum_v_T= 0;
#pragma unroll 6
for(int i = 0;i < length; i++)
{
sum_v_T += fmin(fmax(v_T[i], 0.0f), 1.0f);
}
if ( sum_v_T >= 1.0f )
{
return;
}
float sorted[length];
sort6( v_T, sorted );
float sum_Mu=0;
float s[length];
#pragma unroll 6
for(int i = 0; i < length; i++)
{
sum_Mu += sorted[i];
s[i] = (sum_Mu - 1.0f) / (1 + i);
}
// get Rho
float sRho = s[0];
#pragma unroll 5
for(int i = 1; i < length; i++)
{
sRho = (sorted[i] > s[i]) ? s[i] : sRho;
}
float u[length];
#pragma unroll 6
for(int i = 0;i < length; i++)
{
const float ui = fmax (v_T[i] - sRho, 0.0f);
v_clip[i] = ( f[i] == 1 ) ? (1.0f - ui) : ui;
}
// #pragma unroll 6
// for(int i = 0;i < length; i++)
// {
// v_clip[i] = u_T[i];
// }
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void ADMM_CN_kernel_deg6_16b_mod(
const float *OutputFromDecoder, float *Lzr, const unsigned int *t_col1, int *cn_synrome, int N)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x; // NUMERO DU CHECK NODE A CALCULER
const float rho = 1.9f;
const float un_m_rho = 1.0f - rho;
const int degCn = 6;
float v_proj[6];
float ztemp [6];
__half2* ptr = reinterpret_cast<__half2*>(Lzr);
float* PTR = reinterpret_cast<float*>(sdata);
if (i < N){
const int frame_id = i/1320;
const int frame_offset = i%1320;
const int trame_start = 2640 * (i/1320);
const int IND = 8448 * frame_id; // offset to access mesages from current frame
const int indice = IND + 768 * (frame_offset/128) + frame_offset%128;
int syndrom = 0;
unsigned short* cptr = (unsigned short*)t_col1;//)[]);
const uint3 offset = reinterpret_cast<uint3*>( cptr )[ frame_offset ];
const unsigned int TAB[3] = {offset.x, offset.y, offset.z};
const unsigned short* tab = (const unsigned short*)TAB;
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float xpred = OutputFromDecoder[ trame_start + tab[ k ] ];
syndrom += (xpred > 0.5);
const __half2 data = ptr[ indice +128 * k ];
const auto contribution = (rho * xpred) + (un_m_rho * __high2float(data)) - __low2float(data);
v_proj[k] = contribution;
PTR[threadIdx.x + 128 * k] = contribution;
}
cn_synrome[i] = syndrom & 0x01;
proj_deg6_v2(v_proj, ztemp);
//projection_deg6(v_proj, ztemp);
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float contr = PTR[threadIdx.x + 128 * k];
float x = ztemp[k] - contr;
ptr[ indice +128 * k ] = __halves2half2( __float2half(x), __float2half(ztemp[k]) );
}
}
}
__global__ void ADMM_CN_kernel_deg6_16b_mod2(
const float *OutputFromDecoder, float *Lzr, const unsigned int *t_col1, int *cn_synrome, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // NUMERO DU CHECK NODE A CALCULER
const float rho = 1.9f;
const float un_m_rho = 1.0f - rho;
const int degCn = 6;
float v_proj[6];
float ztemp [6];
__half2* ptr = reinterpret_cast<__half2*>(Lzr);
float* PTR = reinterpret_cast<float*>(sdata);
if (i < N){
const int frame_id = i/1320;
const int frame_offset = i%1320;
const int trame_start = 2640 * (i/1320);
const int IND = 8448 * frame_id; // offset to access mesages from current frame
const int indice = IND + 768 * (frame_offset/128) + frame_offset%128;
int syndrom = 0;
unsigned short* cptr = (unsigned short*)t_col1;//)[]);
const uint3 offset = reinterpret_cast<uint3*>( cptr )[ frame_offset ];
const unsigned int TAB[3] = {offset.x, offset.y, offset.z};
const unsigned short* tab = (const unsigned short*)TAB;
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float xpred = OutputFromDecoder[ trame_start + tab[ k ] ];
syndrom += (xpred > 0.5);
const __half2 data = ptr[ indice +128 * k ];
const auto contribution = (rho * xpred) + (un_m_rho * __high2float(data)) - __low2float(data);
v_proj[k] = contribution;
PTR[threadIdx.x + 128 * k] = contribution;
}
cn_synrome[i] = syndrom & 0x01;
projection_deg6(v_proj, ztemp);
#pragma unroll 6
for(int k = 0; k < degCn; k++)
{
const float contr = PTR[threadIdx.x + 128 * k];
float x = ztemp[k] - contr;
ptr[ indice +128 * k ] = __halves2half2( __float2half(x), __float2half(ztemp[k]) );
}
}
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
unsigned int tid = threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < N)
{
mySum += cn_synrome[i];
// ensure we don't read out of bounds
if (i + blockDim.x < N)
mySum += cn_synrome[i+blockDim.x];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
// avoid bank conflict
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile int* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
cn_synrome[blockIdx.x] = sdata[0];
}
|
cf43dea50499f3e639e1c1b36040d667264f6843.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error("Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int64_t> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = (int64_t)stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int64_t> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent) {
std::vector<int64_t> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent);
break;
case library::LayoutTypeID::kRowMajor:
stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent);
break;
default: break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(
bytes,
layout_id,
extent,
stride);
}
else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = (int)stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout *>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride);
default: break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation():
type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
size_t capacity
):
type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown), batch_count_(1) {
hipError_t result = hipMalloc((void **)&pointer_, bytes(type, capacity));
if (result != hipSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count
):
type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
hipFree(pointer_);
}
}
DeviceAllocation &DeviceAllocation::reset() {
if (pointer_) {
hipFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
hipError_t result = hipMalloc((void **)&pointer_, bytes(type_, capacity_));
if (result != hipSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation &DeviceAllocation::reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ = construct_layout(
tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id,
extent,
stride_);
capacity_ = batch_stride_ * batch_count_;
hipError_t result = hipMalloc((void **)&pointer_, bytes(type, capacity_));
if (result != hipSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void *DeviceAllocation::data() const {
return pointer_;
}
void *DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int64_t> const & DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const & DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) between items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) between items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
hipError_t result = hipMemcpy(data(), ptr, bytes(), hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
hipError_t result = hipMemcpy(data(), ptr, bytes(), hipMemcpyHostToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed host-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
hipError_t result = hipMemcpy(ptr, data(), bytes(), hipMemcpyDeviceToHost);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-host copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
default: break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_sequential_device(Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillSequential<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillSequential<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillSequential<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
static_cast<cutlass::half_t>(dist.sequential.delta),
static_cast<cutlass::half_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillSequential<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
static_cast<cutlass::bfloat16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillSequential<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
static_cast<cutlass::tfloat32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillSequential<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
static_cast<float>(dist.sequential.delta),
static_cast<float>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.delta)),
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillSequential<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
cutlass::complex<float>(
static_cast<float>(dist.sequential.delta)),
cutlass::complex<float>(
static_cast<float>(dist.sequential.start))
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillSequential<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
static_cast<double>(dist.sequential.delta),
static_cast<double>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillSequential<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(pointer_),
capacity_,
cutlass::complex<double>(
static_cast<double>(dist.sequential.delta)),
cutlass::complex<double>(
static_cast<double>(dist.sequential.start))
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillSequential<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
static_cast<int2b_t>(dist.sequential.delta),
static_cast<int2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillSequential<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
static_cast<int4b_t>(dist.sequential.delta),
static_cast<int4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillSequential<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
static_cast<int8_t>(dist.sequential.delta),
static_cast<int8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillSequential<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
static_cast<int16_t>(dist.sequential.delta),
static_cast<int16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillSequential<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
static_cast<int32_t>(dist.sequential.delta),
static_cast<int32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillSequential<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
static_cast<int64_t>(dist.sequential.delta),
static_cast<int64_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillSequential<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
static_cast<uint1b_t>(dist.sequential.delta),
static_cast<uint1b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillSequential<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
static_cast<uint2b_t>(dist.sequential.delta),
static_cast<uint2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillSequential<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
static_cast<uint4b_t>(dist.sequential.delta),
static_cast<uint4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillSequential<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
static_cast<uint8_t>(dist.sequential.delta),
static_cast<uint8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillSequential<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
static_cast<uint16_t>(dist.sequential.delta),
static_cast<uint16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillSequential<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
static_cast<uint32_t>(dist.sequential.delta),
static_cast<uint32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillSequential<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
static_cast<uint64_t>(dist.sequential.delta),
static_cast<uint64_t>(dist.sequential.start)
);
break;
default: break;
}
}
void DeviceAllocation::initialize_sequential_host(Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillSequential<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillSequential<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillSequential<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
static_cast<cutlass::half_t>(dist.sequential.delta),
static_cast<cutlass::half_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillSequential<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
static_cast<cutlass::bfloat16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillSequential<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
static_cast<cutlass::tfloat32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillSequential<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
static_cast<float>(dist.sequential.delta),
static_cast<float>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.delta)),
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillSequential<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
cutlass::complex<float>(
static_cast<float>(dist.sequential.delta)),
cutlass::complex<float>(
static_cast<float>(dist.sequential.start))
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillSequential<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
static_cast<double>(dist.sequential.delta),
static_cast<double>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillSequential<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
cutlass::complex<double>(
static_cast<double>(dist.sequential.delta)),
cutlass::complex<double>(
static_cast<double>(dist.sequential.start))
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillSequential<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
static_cast<int2b_t>(dist.sequential.delta),
static_cast<int2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillSequential<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
static_cast<int4b_t>(dist.sequential.delta),
static_cast<int4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillSequential<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
static_cast<int8_t>(dist.sequential.delta),
static_cast<int8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillSequential<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
static_cast<int16_t>(dist.sequential.delta),
static_cast<int16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillSequential<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
static_cast<int32_t>(dist.sequential.delta),
static_cast<int32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillSequential<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
static_cast<int64_t>(dist.sequential.delta),
static_cast<int64_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillSequential<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
static_cast<uint1b_t>(dist.sequential.delta),
static_cast<uint1b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillSequential<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
static_cast<uint2b_t>(dist.sequential.delta),
static_cast<uint2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillSequential<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
static_cast<uint4b_t>(dist.sequential.delta),
static_cast<uint4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillSequential<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
static_cast<uint8_t>(dist.sequential.delta),
static_cast<uint8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillSequential<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
static_cast<uint16_t>(dist.sequential.delta),
static_cast<uint16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillSequential<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
static_cast<uint32_t>(dist.sequential.delta),
static_cast<uint32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillSequential<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
static_cast<uint64_t>(dist.sequential.delta),
static_cast<uint64_t>(dist.sequential.start)
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const *>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const *>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity);
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareRelativelyEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity,
static_cast<float_e4m3_t>(epsilon),
static_cast<float_e4m3_t>(nonzero_floor));
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareRelativelyEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity,
static_cast<float_e5m2_t>(epsilon),
static_cast<float_e5m2_t>(nonzero_floor));
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids false positives.
// (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<cutlass::complex<half_t> >(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<cutlass::complex<double> >(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
default:
{
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[Rank - 1] = (int)vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[0] = (int)vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(
std::ostream &out,
DeviceAllocation &allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>,
Layout::kStrideRank>(stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const *>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(
std::ostream &out,
DeviceAllocation &allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(
std::ostream &out) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
write_tensor_csv_static_type<float_e4m3_t>(out, *this);
break;
case library::NumericTypeID::kFE5M2:
write_tensor_csv_static_type<float_e5m2_t>(out, *this);
break;
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float> >(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double> >(out, *this);
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
template <typename Element, typename Layout>
static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::LongIndex> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>,
Layout::kStrideRank>(stride, allocation.stride());
TensorView<Element, Layout> view(
static_cast<Element *>(allocation.data()),
Layout(stride),
extent
);
cutlass::reference::device::TensorFill<Element, Layout>(
view,
val
);
}
template <typename Element>
static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val);
break;
case library::LayoutTypeID::kColumnMajor:
tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val);
break;
case library::LayoutTypeID::kTensorNHWC:
tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNDHWC:
tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC32HW32:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC64HW64:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC32RSK32:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC64RSK64:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val);
break;
default:
throw std::runtime_error("Unsupported layout");
break;
}
}
/// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
void DeviceAllocation::fill(double val = 0.0) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
tensor_fill<float_e4m3_t>(*this, static_cast<float_e4m3_t>(val));
break;
case library::NumericTypeID::kFE5M2:
tensor_fill<float_e5m2_t>(*this, static_cast<float_e5m2_t>(val));
break;
case library::NumericTypeID::kF16:
tensor_fill<half_t>(*this, static_cast<half_t>(val));
break;
case library::NumericTypeID::kBF16:
tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val));
break;
case library::NumericTypeID::kTF32:
tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val));
break;
case library::NumericTypeID::kF32:
tensor_fill<float>(*this, static_cast<float>(val));
break;
case library::NumericTypeID::kF64:
tensor_fill<double>(*this, static_cast<double>(val));
break;
case library::NumericTypeID::kS2:
tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val));
break;
case library::NumericTypeID::kS4:
tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val));
break;
case library::NumericTypeID::kS8:
tensor_fill<int8_t>(*this, static_cast<int8_t>(val));
break;
case library::NumericTypeID::kS16:
tensor_fill<int16_t>(*this, static_cast<int16_t>(val));
break;
case library::NumericTypeID::kS32:
tensor_fill<int32_t>(*this, static_cast<int32_t>(val));
break;
case library::NumericTypeID::kS64:
tensor_fill<int64_t>(*this, static_cast<int64_t>(val));
break;
case library::NumericTypeID::kB1:
tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val));
break;
case library::NumericTypeID::kU2:
tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val));
break;
case library::NumericTypeID::kU4:
tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val));
break;
case library::NumericTypeID::kU8:
tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val));
break;
case library::NumericTypeID::kU16:
tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val));
break;
case library::NumericTypeID::kU32:
tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val));
break;
case library::NumericTypeID::kU64:
tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val));
break;
case library::NumericTypeID::kCF16:
tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val));
break;
case library::NumericTypeID::kCF32:
tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val));
break;
case library::NumericTypeID::kCF64:
tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val));
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
|
cf43dea50499f3e639e1c1b36040d667264f6843.cu
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error("Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int64_t> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = (int64_t)stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int64_t> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent) {
std::vector<int64_t> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent);
break;
case library::LayoutTypeID::kRowMajor:
stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent);
break;
default: break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(
bytes,
layout_id,
extent,
stride);
}
else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = (int)stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout *>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride);
default: break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation():
type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
size_t capacity
):
type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown), batch_count_(1) {
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity));
if (result != cudaSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count
):
type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
cudaFree(pointer_);
}
}
DeviceAllocation &DeviceAllocation::reset() {
if (pointer_) {
cudaFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type_, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation &DeviceAllocation::reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ = construct_layout(
tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id,
extent,
stride_);
capacity_ = batch_stride_ * batch_count_;
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void *DeviceAllocation::data() const {
return pointer_;
}
void *DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int64_t> const & DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const & DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) between items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) between items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed host-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-host copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
default: break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_sequential_device(Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillSequential<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillSequential<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillSequential<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
static_cast<cutlass::half_t>(dist.sequential.delta),
static_cast<cutlass::half_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillSequential<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
static_cast<cutlass::bfloat16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillSequential<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
static_cast<cutlass::tfloat32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillSequential<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
static_cast<float>(dist.sequential.delta),
static_cast<float>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.delta)),
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillSequential<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
cutlass::complex<float>(
static_cast<float>(dist.sequential.delta)),
cutlass::complex<float>(
static_cast<float>(dist.sequential.start))
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillSequential<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
static_cast<double>(dist.sequential.delta),
static_cast<double>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillSequential<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(pointer_),
capacity_,
cutlass::complex<double>(
static_cast<double>(dist.sequential.delta)),
cutlass::complex<double>(
static_cast<double>(dist.sequential.start))
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillSequential<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
static_cast<int2b_t>(dist.sequential.delta),
static_cast<int2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillSequential<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
static_cast<int4b_t>(dist.sequential.delta),
static_cast<int4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillSequential<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
static_cast<int8_t>(dist.sequential.delta),
static_cast<int8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillSequential<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
static_cast<int16_t>(dist.sequential.delta),
static_cast<int16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillSequential<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
static_cast<int32_t>(dist.sequential.delta),
static_cast<int32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillSequential<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
static_cast<int64_t>(dist.sequential.delta),
static_cast<int64_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillSequential<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
static_cast<uint1b_t>(dist.sequential.delta),
static_cast<uint1b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillSequential<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
static_cast<uint2b_t>(dist.sequential.delta),
static_cast<uint2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillSequential<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
static_cast<uint4b_t>(dist.sequential.delta),
static_cast<uint4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillSequential<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
static_cast<uint8_t>(dist.sequential.delta),
static_cast<uint8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillSequential<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
static_cast<uint16_t>(dist.sequential.delta),
static_cast<uint16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillSequential<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
static_cast<uint32_t>(dist.sequential.delta),
static_cast<uint32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillSequential<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
static_cast<uint64_t>(dist.sequential.delta),
static_cast<uint64_t>(dist.sequential.start)
);
break;
default: break;
}
}
void DeviceAllocation::initialize_sequential_host(Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillSequential<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillSequential<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillSequential<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
static_cast<cutlass::half_t>(dist.sequential.delta),
static_cast<cutlass::half_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillSequential<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
static_cast<cutlass::bfloat16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillSequential<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
static_cast<cutlass::tfloat32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillSequential<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
static_cast<float>(dist.sequential.delta),
static_cast<float>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.delta)),
cutlass::complex<cutlass::half_t>(
static_cast<cutlass::half_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
cutlass::complex<cutlass::bfloat16_t>(
static_cast<cutlass::bfloat16_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
cutlass::complex<cutlass::tfloat32_t>(
static_cast<cutlass::tfloat32_t>(dist.sequential.start))
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillSequential<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
cutlass::complex<float>(
static_cast<float>(dist.sequential.delta)),
cutlass::complex<float>(
static_cast<float>(dist.sequential.start))
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillSequential<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
static_cast<double>(dist.sequential.delta),
static_cast<double>(dist.sequential.start)
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillSequential<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
cutlass::complex<double>(
static_cast<double>(dist.sequential.delta)),
cutlass::complex<double>(
static_cast<double>(dist.sequential.start))
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillSequential<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
static_cast<int2b_t>(dist.sequential.delta),
static_cast<int2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillSequential<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
static_cast<int4b_t>(dist.sequential.delta),
static_cast<int4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillSequential<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
static_cast<int8_t>(dist.sequential.delta),
static_cast<int8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillSequential<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
static_cast<int16_t>(dist.sequential.delta),
static_cast<int16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillSequential<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
static_cast<int32_t>(dist.sequential.delta),
static_cast<int32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillSequential<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
static_cast<int64_t>(dist.sequential.delta),
static_cast<int64_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillSequential<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
static_cast<uint1b_t>(dist.sequential.delta),
static_cast<uint1b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillSequential<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
static_cast<uint2b_t>(dist.sequential.delta),
static_cast<uint2b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillSequential<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
static_cast<uint4b_t>(dist.sequential.delta),
static_cast<uint4b_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillSequential<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
static_cast<uint8_t>(dist.sequential.delta),
static_cast<uint8_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillSequential<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
static_cast<uint16_t>(dist.sequential.delta),
static_cast<uint16_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillSequential<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
static_cast<uint32_t>(dist.sequential.delta),
static_cast<uint32_t>(dist.sequential.start)
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillSequential<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
static_cast<uint64_t>(dist.sequential.delta),
static_cast<uint64_t>(dist.sequential.start)
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const *>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const *>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity);
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareRelativelyEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity,
static_cast<float_e4m3_t>(epsilon),
static_cast<float_e4m3_t>(nonzero_floor));
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareRelativelyEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity,
static_cast<float_e5m2_t>(epsilon),
static_cast<float_e5m2_t>(nonzero_floor));
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids false positives.
// (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<cutlass::complex<half_t> >(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<cutlass::complex<double> >(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
default:
{
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[Rank - 1] = (int)vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[0] = (int)vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(
std::ostream &out,
DeviceAllocation &allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>,
Layout::kStrideRank>(stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const *>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(
std::ostream &out,
DeviceAllocation &allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(
std::ostream &out) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
write_tensor_csv_static_type<float_e4m3_t>(out, *this);
break;
case library::NumericTypeID::kFE5M2:
write_tensor_csv_static_type<float_e5m2_t>(out, *this);
break;
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float> >(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double> >(out, *this);
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
template <typename Element, typename Layout>
static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::LongIndex> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>,
Layout::kStrideRank>(stride, allocation.stride());
TensorView<Element, Layout> view(
static_cast<Element *>(allocation.data()),
Layout(stride),
extent
);
cutlass::reference::device::TensorFill<Element, Layout>(
view,
val
);
}
template <typename Element>
static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val);
break;
case library::LayoutTypeID::kColumnMajor:
tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val);
break;
case library::LayoutTypeID::kTensorNHWC:
tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNDHWC:
tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC32HW32:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC64HW64:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC32RSK32:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC64RSK64:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val);
break;
default:
throw std::runtime_error("Unsupported layout");
break;
}
}
/// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
void DeviceAllocation::fill(double val = 0.0) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
tensor_fill<float_e4m3_t>(*this, static_cast<float_e4m3_t>(val));
break;
case library::NumericTypeID::kFE5M2:
tensor_fill<float_e5m2_t>(*this, static_cast<float_e5m2_t>(val));
break;
case library::NumericTypeID::kF16:
tensor_fill<half_t>(*this, static_cast<half_t>(val));
break;
case library::NumericTypeID::kBF16:
tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val));
break;
case library::NumericTypeID::kTF32:
tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val));
break;
case library::NumericTypeID::kF32:
tensor_fill<float>(*this, static_cast<float>(val));
break;
case library::NumericTypeID::kF64:
tensor_fill<double>(*this, static_cast<double>(val));
break;
case library::NumericTypeID::kS2:
tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val));
break;
case library::NumericTypeID::kS4:
tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val));
break;
case library::NumericTypeID::kS8:
tensor_fill<int8_t>(*this, static_cast<int8_t>(val));
break;
case library::NumericTypeID::kS16:
tensor_fill<int16_t>(*this, static_cast<int16_t>(val));
break;
case library::NumericTypeID::kS32:
tensor_fill<int32_t>(*this, static_cast<int32_t>(val));
break;
case library::NumericTypeID::kS64:
tensor_fill<int64_t>(*this, static_cast<int64_t>(val));
break;
case library::NumericTypeID::kB1:
tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val));
break;
case library::NumericTypeID::kU2:
tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val));
break;
case library::NumericTypeID::kU4:
tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val));
break;
case library::NumericTypeID::kU8:
tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val));
break;
case library::NumericTypeID::kU16:
tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val));
break;
case library::NumericTypeID::kU32:
tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val));
break;
case library::NumericTypeID::kU64:
tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val));
break;
case library::NumericTypeID::kCF16:
tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val));
break;
case library::NumericTypeID::kCF32:
tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val));
break;
case library::NumericTypeID::kCF64:
tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val));
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
|
2dd9d67b829622e202ed50b74be370a3f28003e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
// ://computer-graphics.se/hello-world-for-cuda.html
//Include GLEW
#include <GL/glew.h>
//Include GLFW
#include <GLFW/glfw3.h>
#include <stdio.h>
#define _USE_MATH_DEFINES
#include <math.h>
const int block_width = 1024;
const int block_height = 1;
const int MAX_BULLET_COUNT = 100000;
const int MAX_ENEMY_COUNT = 100;
const int MAX_SHOT_COUNT = 1000;
const int MAX_host_to_device_data_COUNT = 1000;
const int GAMEFIELD_SEMIWIDTH = 320;//6 * 32;
const int GAMEFIELD_SEMIHEIGHT = 240;//7 * 32;
const int PLAYER_TOLERANCE = -20;
const int BULLET_TOLERANCE = 50;
const int ENEMY_TOLERANCE = 100;
const int SHOT_TOLERANCE = 100;
const int PLAYER_FORM_TIME = 15;
const int PLAYER_ACTIVE_INVUL_TIME = 180;
const int PLAYER_FADE_TIME = 15;
const int PLAYER_DEAD_TIME = 30;
const int ENEMY_FORM_TIME = 0;
const int ENEMY_FADE_TIME = 15;
const int SHOT_FADE_TIME = 5;
enum entity_status { FORM, ACTIVE, FADE, DEAD };
enum bullet_type { BASIC };
//Define an error callback
static void error_callback(int error, const char* description);
struct point;
struct polar_point;
struct host_data;
struct point{
double x;
double y;
__host__ __device__ point();
__host__ __device__ point(const point& p);
__host__ __device__ point(double x, double y);
__host__ __device__ point(const polar_point& p);
};
struct polar_point{
double r;
double t;
__host__ __device__ polar_point();
__host__ __device__ polar_point(const polar_point& p);
__host__ __device__ polar_point(double r, double t);
__host__ __device__ polar_point(const point& p);
};
point::point() : x(0), y(0){}
point::point(const point& p) : x(p.x), y(p.y){}
point::point(double x, double y) : x(x), y(y){}
point::point(const polar_point& p) {
x = p.r*cos(p.t);
y = p.r*sin(p.t);
}
__host__ __device__ point operator+(point & a, point & b){
point return_value(a.x + b.x, a.y + b.y);
return return_value;
}
__host__ __device__ point operator*(point & a, point & b){
point return_value(a.x*b.x, a.y*b.y);
return return_value;
}
__host__ __device__ point operator/(point & a, point & b){
point return_value(a.x/b.x, a.y/b.y);
return return_value;
}
__host__ __device__ point operator-(point & a, point & b){
point return_value(a.x-b.x, a.y-b.y);
return return_value;
}
polar_point::polar_point() : r(0), t(0){};
polar_point::polar_point(const polar_point& p) : r(p.r), t(p.t){}
polar_point::polar_point(double r, double t) : r(r), t(t){}
polar_point::polar_point(const point& p) {
r = hypot(p.x, p.y);
t = ((p.x != 0) || (p.y != 0)) ?
atan2(p.y, p.x) :
0;
}
struct shot{
point pos;
point vel;
point semi_size;
int damage;
entity_status status;
int age;
};
struct shot_container{
shot shot_list[MAX_SHOT_COUNT];
int shot_count;
};
typedef struct bullet_t{
point pos;
point vel;
point acc;
entity_status status;
bullet_type type;
int age;
double theta;
double w;
__host__ __device__ bullet_t() : status(FORM), type(BASIC), age(0), w(0){}
__host__ __device__ bullet_t(bullet_t& b) : pos(b.pos), vel(b.vel), acc(b.acc),
status(b.status), type(b.type), age(b.age),
theta(b.theta), w(b.w) {}
} bullet;
typedef struct pull_info_t{
bool need;
int delta;
} pull_info;
typedef struct bullet_slot_t{
bullet load;
pull_info pull;
} bullet_slot;
typedef struct bullet_draw_info_t{
point pos;
double theta;
entity_status status;
bullet_type type;
} bullet_draw_info;
typedef struct bullet_container_t{
bullet_slot bullet_slots[MAX_BULLET_COUNT];
int bullet_count;
bool collision_with_player;
} bullet_container;
typedef struct device_to_host_data_t{
bullet_draw_info draw_slots[MAX_BULLET_COUNT];
int bullet_count;
bool collision_with_player;
} device_to_host_data;
typedef struct host_to_device_data_t{
bullet bullets[MAX_host_to_device_data_COUNT];
int queue_count;
} host_to_device_data;
struct player{
point pos;
point vel;
static const int radius = 3;
entity_status status;
int age;
bool invul;
bool is_hit;
bool is_focus;
bool is_shooting;
__host__ __device__ player() : pos(0, -GAMEFIELD_SEMIHEIGHT*.8), vel(0, 0), status(FORM), age(0),
invul(false){}
};
struct enemy{
point pos;
point vel;
entity_status status;
int hp;
int age;
double radius;
void(*update) (enemy&, host_data&);
};
struct enemy_container{
enemy enemy_list[MAX_ENEMY_COUNT];
int enemy_count;
__host__ __device__ enemy_container(): enemy_count(0) {}
};
typedef struct draw_data_t{
bullet_draw_info* bullet_draw_infos;
int* bullet_count;
player* player_info;
} draw_data;
struct bullet_properties{
point semi_size;
};
struct host_data{
enemy_container enemies;
host_to_device_data htd_data;
player main_player;
device_to_host_data dth_data;
shot_container shots;
int age;
int deaths;
int enemies_killed;
};
__device__ __host__ point rotate_point(const point& pt,const double theta){
point return_value(cos(theta)*pt.x - sin(theta)*pt.y,
cos(theta)*pt.y + sin(theta)*pt.x);
return return_value;
}
__device__ __host__ point transpose_point(const point& pt){
point return_value(pt.y,pt.x);
return return_value;
}
__device__ __host__ bullet_properties get_bullet_properties(bullet& bullet){
bullet_properties return_value;
switch (bullet.type)
{
case BASIC:
return_value.semi_size = point(2, 2);
break;
default:
break;
}
return return_value;
}
__global__ void container_initialize_all_bullet(bullet_container* data,
size_t bullet_count)
{
data->bullet_count = bullet_count;
int bullet_index = threadIdx.x;
while (bullet_index < bullet_count){
polar_point rt_vector = { 0, 0 };
data->bullet_slots[bullet_index].load.pos = point(rt_vector);
rt_vector.r = bullet_index*.0001;
rt_vector.t = bullet_index;
data->bullet_slots[bullet_index].load.vel = point(rt_vector);
rt_vector.r = -.001;
rt_vector.t = bullet_index;
data->bullet_slots[bullet_index].load.acc = point(rt_vector);
data->bullet_slots[bullet_index].load.theta = rt_vector.t;
data->bullet_slots[bullet_index].load.w = -.001;
data->bullet_slots[bullet_index].load.status = ACTIVE;
bullet_index += blockDim.x;
}
}
__global__ void container_add_new_bullets(bullet_container* data, host_to_device_data* new_bullets){
int old_bullet_count = data->bullet_count;
int add_bullet_count = new_bullets->queue_count;
int bullet_index = threadIdx.x;
while (bullet_index < add_bullet_count){
data->bullet_slots[old_bullet_count + bullet_index].load = new_bullets->bullets[bullet_index];
bullet_index += blockDim.x;
}
data->bullet_count = old_bullet_count + add_bullet_count;
}
__host__ __device__ bool in_bounds(point& pos, double tolerance){
if (abs(pos.x) > GAMEFIELD_SEMIWIDTH + tolerance ||
abs(pos.y) > GAMEFIELD_SEMIHEIGHT + tolerance){
return false;
}
else {
return true;
}
}
__device__ void check_bounds_bullet(bullet* bullet){
if (!in_bounds(bullet->pos, BULLET_TOLERANCE)){
bullet->status = DEAD;
}
}
__device__ void update_bullet(bullet* bullet){
bullet->pos.x += bullet->vel.x;
bullet->pos.y += bullet->vel.y;
bullet->vel.x += bullet->acc.x;
bullet->vel.y += bullet->acc.y;
bullet->theta += bullet->w;
bullet->age += 1;
bullet->theta++;
check_bounds_bullet(bullet);
}
__global__ void update_all_bullet(bullet* bullets,
size_t bullet_count)
{
int bullet_index = threadIdx.x;
while (bullet_index < bullet_count){
update_bullet(bullets + bullet_index);
bullet_index += blockDim.x;
}
}
__global__ void container_update_all_bullet(bullet_container* data)
{
int bullet_index = threadIdx.x;
int count = data->bullet_count;
while (bullet_index < count){
update_bullet(&(data->bullet_slots[bullet_index].load));
bullet_index += blockDim.x;
}
}
__global__ void mark_bullet_pull(bullet_container* container){
int slot_range_width = 1 + (container->bullet_count - 1) / ((int)blockDim.x);
int offset = 0;
for (int i = 0; i < slot_range_width; ++i){
int index = i + slot_range_width*threadIdx.x;
if (container->bullet_slots[index].load.status == DEAD){
++offset;
container->bullet_slots[index].pull.need = false;
} else {
container->bullet_slots[index].pull.need = true;
}
container->bullet_slots[index].pull.delta = offset;
}
for (int k = 1; k <= blockDim.x; k = k << 1){
__syncthreads();
int delta = 0;
if ((k & threadIdx.x) && ((threadIdx.x / k)*slot_range_width*k >= 1)){
delta = container->bullet_slots[(threadIdx.x / k)*slot_range_width*k - 1].pull.delta;
}
for (int i = 0; i < slot_range_width; ++i){
int index = i + slot_range_width*threadIdx.x;
container->bullet_slots[index].pull.delta += delta;
}
}
}
__global__ void relocate_all_bullet(volatile bullet_container* container){
int bullet_index = threadIdx.x;
int count = container->bullet_count;
int delta;
while (bullet_index < count){
bullet load;
load = *((bullet*) &(container->bullet_slots[bullet_index].load));
__syncthreads();
delta = container->bullet_slots[bullet_index].pull.delta;
if (container->bullet_slots[bullet_index].pull.need && (bullet_index - delta >= 0)){
*((bullet*)&(container->bullet_slots[bullet_index-delta].load)) = load;
}
bullet_index += blockDim.x;
}
if (bullet_index == count - 1 + blockDim.x){
container->bullet_count = count - delta;
}
}
__device__ void extract_bullet_draw_info(bullet* bullet,
bullet_draw_info* output){
output->pos = bullet->pos;
output->theta = bullet->theta;
output->status = bullet->status;
output->type = bullet->type;
}
__global__ void container_extract_all_bullet_draw_info(bullet_container* b_container,
device_to_host_data* d_container){
int bullet_index = threadIdx.x;
d_container->bullet_count = b_container->bullet_count;
while (bullet_index < b_container->bullet_count){
extract_bullet_draw_info(&(b_container->bullet_slots[bullet_index].load),
&(d_container->draw_slots[bullet_index]));
bullet_index += blockDim.x;
}
d_container->collision_with_player = b_container->collision_with_player;
}
__device__ bool collide_against_player(bullet& bullet, player& main_player){
bullet_properties prop = get_bullet_properties(bullet);
point dist_thresh = (prop.semi_size + point(main_player.radius, main_player.radius));
point dist = rotate_point(bullet.pos - main_player.pos, -bullet.theta)* transpose_point(dist_thresh);
if (dist.x*dist.x + dist.y*dist.y < dist_thresh.x*dist_thresh.x*dist_thresh.y*dist_thresh.y){
bullet.status = DEAD;
return true;
}
return false;
}
__global__ void container_collide_against_player(bullet_container* b_container,
player main_player){
int bullet_index = threadIdx.x;
b_container->collision_with_player = false;
bool collided = false;
while (bullet_index < b_container->bullet_count){
collided |= collide_against_player(b_container->bullet_slots[bullet_index].load, main_player);
bullet_index += blockDim.x;
}
if (collided){
b_container->collision_with_player = true;
}
}
void pxlVertex2f(double x, double y){
glVertex2f(x / GAMEFIELD_SEMIWIDTH, y / GAMEFIELD_SEMIHEIGHT);
}
void pxlVertexPos(const point& pt){
glVertex2f(pt.x / GAMEFIELD_SEMIWIDTH, pt.y / GAMEFIELD_SEMIHEIGHT);
}
void gl_setup(GLFWwindow** window){
//Set the error callback
glfwSetErrorCallback(error_callback);
//Initialize GLFW
if (!glfwInit())
{
exit(EXIT_FAILURE);
}
//Set the GLFW window creation hints - these are optional
//glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); //Request a specific OpenGL version
//glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); //Request a specific OpenGL version
glfwWindowHint(GLFW_SAMPLES, 4); //Request 4x antialiasing
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//Declare a window object
//glEnable(GL_COLOR_MATERIAL);
//Create a window and create its OpenGL context
*window = glfwCreateWindow(2*GAMEFIELD_SEMIWIDTH, 2*GAMEFIELD_SEMIHEIGHT, "Test Window", NULL, NULL);
//If the window couldn't be created
if (!*window)
{
fprintf(stderr, "Failed to open GLFW window.\n");
glfwTerminate();
exit(EXIT_FAILURE);
}
//This function makes the context of the specified window current on the calling thread.
glfwMakeContextCurrent(*window);
//Initialize GLEW
GLenum err = glewInit();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//If GLEW hasn't initialized
if (err != GLEW_OK)
{
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
}
void queue_bullet(bullet& new_bullet, host_to_device_data& queue){
if (queue.queue_count < MAX_host_to_device_data_COUNT){
queue.bullets[queue.queue_count] = new_bullet;
++(queue.queue_count);
}
}
void queue_enemy(const enemy& new_enemy, host_data& game_data_h){
game_data_h.enemies.enemy_list[game_data_h.enemies.enemy_count] = new_enemy;
++game_data_h.enemies.enemy_count;
}
void queue_shot(const shot& new_shot, host_data& game_data_h){
game_data_h.shots.shot_list[game_data_h.shots.shot_count] = new_shot;
++game_data_h.shots.shot_count;
}
void draw_bullet(bullet_draw_info* info){
//glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_POLYGON); // Each set of 4 vertices form a quad
if (info->status == DEAD){
glColor3f(1.0, 0.0, 0.0); // Red
}
else {
glColor4f(1.0, 0.0, 1.0, 1.0);
}
for (int i = 0; i < 8; ++i){
pxlVertexPos(info->pos + point(polar_point(5, i * 2 * M_PI / 8)));
}
glEnd();
glBegin(GL_POLYGON); // Each set of 4 vertices form a quad
if (info->status == DEAD){
glColor3f(1.0, 0.0, 0.0); // Red
}
else {
glColor4f(1.0, 1.0, 1.0, .8);
}
for (int i = 0; i < 8; ++i){
pxlVertexPos(info->pos + point(polar_point(4, i * 2 * M_PI / 8)));
}
glEnd();
}
void draw_player(player& main_player){
glBegin(GL_QUADS); // Each set of 4 vertices form a quad
double ratio;
switch (main_player.status){
case FORM:
ratio = ((double)main_player.age) / PLAYER_FORM_TIME;
glColor4f(1.0, 0.5, 0.5, ratio);
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y - 25);
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y - 25);
break;
case ACTIVE:
if (main_player.invul && (main_player.age & 4)){
glColor4f(1.0, .6, .6, 1.0);
}
else {
glColor4f(1.0, 0.5, 0.5, 1.0);
}
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y - 25);
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y - 25);
break;
case FADE:
ratio = ((double)main_player.age) / PLAYER_FADE_TIME;
glColor4f(1.0, 0.5, 0.5, 1.0 - ratio);
pxlVertex2f(main_player.pos.x + 25 * (1.0 + ratio), main_player.pos.y + 25 * (1.0 + ratio));
pxlVertex2f(main_player.pos.x - 25 * (1.0 + ratio), main_player.pos.y + 25 * (1.0 + ratio));
pxlVertex2f(main_player.pos.x - 25 * (1.0 + ratio), main_player.pos.y - 25 * (1.0 + ratio));
pxlVertex2f(main_player.pos.x + 25 * (1.0 + ratio), main_player.pos.y - 25 * (1.0 + ratio));
break;
case DEAD:
glColor4f(0.0, 0.0, 0.0, .0);
break;
default:
glColor3f(0.0, 0.0, 0.0);
break;
}
glEnd();
}
void draw_player_hitbox(player& main_player){
glBegin(GL_QUADS);
glColor3f(1.0, 1.0, 1.0);
pxlVertex2f(main_player.pos.x + 3, main_player.pos.y + 3);
pxlVertex2f(main_player.pos.x - 3, main_player.pos.y + 3);
pxlVertex2f(main_player.pos.x - 3, main_player.pos.y - 3);
pxlVertex2f(main_player.pos.x + 3, main_player.pos.y - 3);
glEnd();
}
void draw_enemy(enemy& enemy){
glBegin(GL_QUADS);
double mult = 1;
glColor3f(1.0, 0.5, 0.0);
if (enemy.status == FADE){
double ratio = (1.0*enemy.age) / (ENEMY_FADE_TIME);
mult = 1 + ratio;
glColor4f(1.0, 0.5, 0.0, 1-ratio);
}
pxlVertex2f(enemy.pos.x + mult*enemy.radius, enemy.pos.y + mult*enemy.radius);
pxlVertex2f(enemy.pos.x - mult*enemy.radius, enemy.pos.y + mult*enemy.radius);
pxlVertex2f(enemy.pos.x - mult*enemy.radius, enemy.pos.y - mult*enemy.radius);
pxlVertex2f(enemy.pos.x + mult*enemy.radius, enemy.pos.y - mult*enemy.radius);
glEnd();
}
void draw_shot(shot& shot){
glBegin(GL_QUADS);
double t = polar_point(shot.vel).t - M_PI_2;
point c1 = rotate_point(point(1, 1)*shot.semi_size, t);
point c2 = rotate_point(point(1, -1)*shot.semi_size, t);
point c3 = rotate_point(point(-1, -1)* shot.semi_size, t);
point c4 = rotate_point(point(-1, 1)*shot.semi_size, t);
if (shot.status == ACTIVE)
{
glColor4f(0.5, 0.5, 1.0, .5);
pxlVertexPos(shot.pos + c1);
pxlVertexPos(shot.pos + c2);
pxlVertexPos(shot.pos + c3);
pxlVertexPos(shot.pos + c4);
}
if (shot.status == FADE){
double ratio = (1.0*shot.age) / SHOT_FADE_TIME;
point ratio_pt(1.0 + ratio, 1.0 + ratio);
glColor4f(0.5, 0.5, 1.0, .5*(1-ratio));
pxlVertexPos(shot.pos + c1*ratio_pt);
pxlVertexPos(shot.pos + c2*ratio_pt);
pxlVertexPos(shot.pos + c3*ratio_pt);
pxlVertexPos(shot.pos + c4*ratio_pt);
}
glEnd();
}
void draw_screen(host_data* game_data){
glClear(GL_COLOR_BUFFER_BIT);
draw_player(game_data->main_player);
for (int i = 0; i < game_data->enemies.enemy_count; ++i){
draw_enemy((game_data->enemies.enemy_list[i]));
}
for (int i = 0; i < game_data->shots.shot_count; ++i){
draw_shot((game_data->shots.shot_list[i]));
}
for (int i = game_data->dth_data.bullet_count - 1; i >= 0; --i){
draw_bullet(&(game_data->dth_data.draw_slots[i]));
}
draw_player_hitbox(game_data->main_player);
}
__device__ __host__ point set_mag_point(point& pt, double mag){
polar_point polar_pt(pt);
polar_pt.r = mag;
point return_value(polar_pt);
return return_value;
}
void set_player_velocity(player& main_player, GLFWwindow *window){
if (glfwGetKey(window, GLFW_KEY_UP)){
main_player.vel.y = 1;
}
else if (glfwGetKey(window, GLFW_KEY_DOWN)){
main_player.vel.y = -1;
}
else {
main_player.vel.y = 0;
}
if (glfwGetKey(window, GLFW_KEY_RIGHT)){
main_player.vel.x = 1;
}
else if (glfwGetKey(window, GLFW_KEY_LEFT)){
main_player.vel.x = -1;
}
else {
main_player.vel.x = 0;
}
if (glfwGetKey(window, GLFW_KEY_LEFT_SHIFT)){
main_player.is_focus = true;
if (main_player.vel.x != 0 || main_player.vel.y != 0){
main_player.vel = set_mag_point(main_player.vel, 1.0);
}
}
else {
main_player.is_focus = false;
if (main_player.vel.x != 0 || main_player.vel.y != 0){
main_player.vel = set_mag_point(main_player.vel, 3.0);
}
}
if (glfwGetKey(window, GLFW_KEY_Z)){
main_player.is_shooting = true;
}
else {
main_player.is_shooting = false;
}
}
void move_player(player& main_player){
main_player.pos.x += main_player.vel.x;
main_player.pos.y += main_player.vel.y;
if (abs(main_player.pos.x) > GAMEFIELD_SEMIWIDTH + PLAYER_TOLERANCE){
main_player.pos.x = (GAMEFIELD_SEMIWIDTH + PLAYER_TOLERANCE) *
((main_player.pos.x > 0) - (main_player.pos.x < 0));
}
if (abs(main_player.pos.y) > GAMEFIELD_SEMIHEIGHT + PLAYER_TOLERANCE){
main_player.pos.y = (GAMEFIELD_SEMIHEIGHT + PLAYER_TOLERANCE) *
((main_player.pos.y > 0) - (main_player.pos.y < 0));
}
}
void update_player(player& main_player, host_data& game_data_h){
main_player.is_hit |= game_data_h.dth_data.collision_with_player;
switch (main_player.status){
case FORM:
main_player.invul = true;
if (main_player.age > PLAYER_FORM_TIME){
main_player.status = ACTIVE;
main_player.age = 0;
}
break;
case ACTIVE:
if (main_player.age == PLAYER_ACTIVE_INVUL_TIME){
main_player.invul = false;
}
move_player(main_player);
if (main_player.is_hit && !main_player.invul){
main_player.status = FADE;
main_player.age = 0;
}
if (main_player.age % 6 == 0 && main_player.is_shooting){
shot new_shot;
new_shot.damage = 8;
new_shot.semi_size = point(3, 12);
new_shot.status = FORM;
new_shot.age = 0;
double spread = main_player.is_focus ? 1.0 : 3.0;
new_shot.vel = point(0, 10);
new_shot.pos = main_player.pos + point(-15, -10);
queue_shot(new_shot, game_data_h);
new_shot.pos = main_player.pos + point(15, -10);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 + .02*spread);
new_shot.pos = main_player.pos + point(-4 * spread, 5);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 - .02*spread);
new_shot.pos = main_player.pos + point(4 * spread, 5);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 - .06*spread);
new_shot.pos = main_player.pos + point(12 * spread, 5);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 + .06*spread);
new_shot.pos = main_player.pos + point(-12 * spread, 5);
queue_shot(new_shot, game_data_h);
}
break;
case FADE:
if (main_player.age > PLAYER_FADE_TIME){
main_player.status = DEAD;
main_player.age = 0;
game_data_h.deaths++;
}
break;
case DEAD:
if (main_player.age > PLAYER_DEAD_TIME){
main_player.status = FORM;
main_player.age = 0;
}
break;
default:
break;
}
main_player.is_hit = false;
++main_player.age;
}
void generic_enemy_update(enemy& self, host_data& game_data_h){
point diff;
switch (self.status){
case FORM:
if (self.age > ENEMY_FORM_TIME){
self.status = ACTIVE;
self.age = 0;
}
break;
case ACTIVE:
self.pos = self.pos + self.vel;
diff = self.pos - game_data_h.main_player.pos;
if (abs(diff.x) < self.radius + game_data_h.main_player.radius &&
abs(diff.y) < self.radius + game_data_h.main_player.radius){
game_data_h.main_player.is_hit = true;
}
if (!in_bounds(self.pos, ENEMY_TOLERANCE) || self.hp <= 0){
self.status = FADE;
self.age = 0;
if (self.hp <= 0){
game_data_h.enemies_killed++;
}
}
break;
case FADE:
if (self.age > ENEMY_FADE_TIME){
self.status = DEAD;
self.age = 0;
}
break;
case DEAD:
break;
default:
break;
}
++self.age;
}
void update_function_1(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 60 == 30){
bullet sample;
sample.pos = self.pos;
polar_point diff = game_data_h.main_player.pos - self.pos;
for (int dir = -1; dir <= 1; dir += 2){
for (int mag = 0; mag < 4; ++mag){
polar_point new_vel((.5+.3*mag), diff.t + dir*.1);
sample.vel = new_vel;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
}
void update_function_2(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 120 == 0 && self.pos.y > GAMEFIELD_SEMIHEIGHT*.3){
bullet sample;
polar_point diff = game_data_h.main_player.pos - self.pos;
for (int dir = 0; dir < 32; ++dir){
double t = diff.t + dir * 2 * M_PI / 32;
sample.pos = self.pos + point(polar_point(50,t));
for (int j = -1; j <= 1; j += 2){
for (int mag = 5; mag <= 11; mag += 1){
polar_point new_vel(mag*.1*j, t + M_PI_2);
sample.vel = new_vel;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
}
}
void update_function_3(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 60 == 0 && self.pos.y > GAMEFIELD_SEMIHEIGHT*.3){
bullet sample;
polar_point diff = game_data_h.main_player.pos - self.pos;
for (int dir = -3; dir <= 3; ++dir){
double t = diff.t + dir * .2;
sample.pos = self.pos;
for (int mag = 3; mag <= 5; mag += 1){
polar_point new_vel(mag*.2, t);
sample.vel = new_vel;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
}
void update_function_4(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 10 == 0 && self.pos.y > GAMEFIELD_SEMIHEIGHT*-.1){
bullet sample;
for (int dir = 0; dir < 8; ++dir){
double t = dir * 2 * M_PI / 8 + self.age;
sample.pos = self.pos + point(polar_point(30, t + M_PI_2));
polar_point new_acc(.001, t);
sample.acc = new_acc;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
void update_function_5(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
int arms = 7;
if (self.age % 2 == 0 && self.age > 300){
bullet sample;
self.vel = point(0, 0);
for (int dir = 0; dir < arms; ++dir){
double t = dir * 2 * M_PI / arms + 0.0002*self.age*self.age;
sample.pos = self.pos;
polar_point new_vel(1.0, t);
sample.vel = new_vel;
polar_point new_acc(-.005, t);
sample.acc = new_acc;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
enemy enemy1(point pos, point vel){
enemy return_value;
return_value.pos = pos;
return_value.vel = vel;
return_value.status = FORM;
return_value.radius = 20;
return_value.update = update_function_2;
return_value.hp = 2000;
return_value.age = 0;
return return_value;
}
enemy set_enemy(point pos, point vel, int hp, void(*update_function)(enemy&, host_data&)){
enemy return_value;
return_value.pos = pos;
return_value.vel = vel;
return_value.status = FORM;
return_value.radius = 20;
return_value.update = update_function;
return_value.hp = hp;
return_value.age = 0;
return return_value;
}
void update_enemies(host_data& game_data_h){
for (int i = 0; i < game_data_h.enemies.enemy_count; ++i){
game_data_h.enemies.enemy_list[i].update(
game_data_h.enemies.enemy_list[i], game_data_h);
}
int j = 0;
for (int i = 0; i < game_data_h.enemies.enemy_count; ++i){
if (game_data_h.enemies.enemy_list[i].status != DEAD){
game_data_h.enemies.enemy_list[j] = game_data_h.enemies.enemy_list[i];
++j;
}
}
game_data_h.enemies.enemy_count = j;
}
void update_shot(shot& shot, host_data& game_data_h){
point diff;
polar_point polar_vel;
switch (shot.status){
case FORM:
shot.status = ACTIVE;
shot.age = 0;
break;
case ACTIVE:
shot.pos = shot.pos + shot.vel;
polar_vel = shot.vel;
for (int i = 0; i < game_data_h.enemies.enemy_count; ++i){
diff = rotate_point(game_data_h.enemies.enemy_list[i].pos - shot.pos, -polar_vel.t);
if (abs(diff.x) < shot.semi_size.x + game_data_h.enemies.enemy_list[i].radius &&
abs(diff.y) < shot.semi_size.y + game_data_h.enemies.enemy_list[i].radius){
game_data_h.enemies.enemy_list[i].hp -= shot.damage;
shot.status = FADE;
shot.age = 0;
}
}
if (!in_bounds(shot.pos, SHOT_TOLERANCE)){
shot.status = FADE;
shot.age = 0;
}
break;
case FADE:
shot.pos = shot.pos + shot.vel;
if (shot.age > SHOT_FADE_TIME){
shot.status = DEAD;
shot.age = 0;
}
break;
case DEAD:
break;
default:
break;
}
++shot.age;
}
void update_shots(host_data& game_data_h){
for (int i = 0; i < game_data_h.shots.shot_count; ++i){
update_shot(
game_data_h.shots.shot_list[i], game_data_h);
}
int j = 0;
for (int i = 0; i < game_data_h.shots.shot_count; ++i){
if (game_data_h.shots.shot_list[i].status != DEAD){
game_data_h.shots.shot_list[j] = game_data_h.shots.shot_list[i];
++j;
}
}
game_data_h.shots.shot_count = j;
}
void game_script(host_data& game_data_h){
if (game_data_h.age == 3000){
queue_enemy(enemy1(point(GAMEFIELD_SEMIWIDTH*-.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, 3 * M_PI_2)), game_data_h);
}
if (game_data_h.age == 2200){
queue_enemy(enemy1(point(GAMEFIELD_SEMIWIDTH*.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, 3 * M_PI_2)), game_data_h);
}
if (game_data_h.age == 1120 || game_data_h.age == 1120 + 60 || game_data_h.age == 1120 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 7.5 * M_PI_4), 300, update_function_4), game_data_h);
}
if (game_data_h.age == 1480 || game_data_h.age == 1480 + 60 || game_data_h.age == 1480 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 4.5 * M_PI_4), 300, update_function_4), game_data_h);
}
if (game_data_h.age == 120 || game_data_h.age == 120 + 60 || game_data_h.age == 120 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 6.5 * M_PI_4), 300, update_function_3), game_data_h);
}
if (game_data_h.age == 480 || game_data_h.age == 480 + 60 || game_data_h.age == 480 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 5.5 * M_PI_4), 300, update_function_3), game_data_h);
}
if (game_data_h.age >= 4000 && game_data_h.age <= 4500 && game_data_h.age%50==0){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*.1, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(2.5, 7 * M_PI_4), 100, update_function_1), game_data_h);
}
if (game_data_h.age == 5000){
for (int i = 0; i < 5; ++i){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 + .5*i) * M_PI_4), 200, update_function_4), game_data_h);
}
}
if (game_data_h.age == 6000){
for (int i = 0; i < 5; ++i){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 - .5*i) * M_PI_4), 200, update_function_4), game_data_h);
}
}
if (game_data_h.age == 7000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 + .5*1) * M_PI_4), 100, update_function_4), game_data_h);
}
if (game_data_h.age == 8000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 - .5 * 1) * M_PI_4), 100, update_function_4), game_data_h);
}
if (game_data_h.age == 9000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.6, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.6, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
}
if (game_data_h.age == 9060){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH * 0, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
}
if (game_data_h.age == 10000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH * 0, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(0.5, (6) * M_PI_4), 10000, update_function_5), game_data_h);
}
++game_data_h.age;
}
int main()
{
bullet_container* data_d;
device_to_host_data* draw_d;
//device_to_host_data* draw_h;
host_to_device_data* new_host_to_device_data_d;
//host_to_device_data* new_host_to_device_data_h;
host_data* game_data_h;
GLFWwindow* window;
gl_setup(&window);
const int bullets_count = 0;
const int bullets_size = MAX_BULLET_COUNT*sizeof(bullet);
const int bullet_draw_infos_size = MAX_BULLET_COUNT*sizeof(bullet_draw_info);
dim3 dimBlock(block_width, block_height);
dim3 dimGrid(1, 1);
hipMalloc((void**)&data_d, sizeof(bullet_container));
hipMalloc((void**)&draw_d, sizeof(bullet_container));
hipMalloc((void**)&new_host_to_device_data_d, sizeof(host_to_device_data));
/*hipHostMalloc((void**)&draw_h, sizeof(device_to_host_data));
hipHostMalloc((void**)&new_host_to_device_data_h, sizeof(host_to_device_data));*/
hipHostMalloc((void**)&game_data_h, sizeof(host_data));
/*draw_data_pointers.bullet_count = &(draw_h->bullet_count);
draw_data_pointers.bullet_draw_infos = (draw_h->draw_slots);
draw_data_pointers.player_info = &(main_player);*/
game_data_h->age = 0;
game_data_h->main_player = player();
hipStream_t stream1, stream2, stream3, stream4;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
hipStreamCreate(&stream4);
hipLaunchKernelGGL(( container_initialize_all_bullet) , dim3(dimGrid), dim3(dimBlock), 0, stream4 ,
data_d,
bullets_count);
hipStreamSynchronize(stream4);
//Set a background color
glClearColor(0.0f, 0.0f, .5f, 0.0f);
double time = glfwGetTime();
const double FRAME_PERIOD = 1.0l / 60.0l;
game_data_h->htd_data.queue_count = 0;
//Main Loop
do
{
if (glfwGetTime() - time >= FRAME_PERIOD){
printf("Frame Rate: %f\n Bullets: %d\n Deaths: %d\n Enemies Killed: %d\n",
1.0 / (glfwGetTime() - time),
game_data_h->dth_data.bullet_count,
game_data_h->deaths,
game_data_h->enemies_killed
);
time = glfwGetTime();
//test_queue_bullet(game_data_h->htd_data);
// move bullets to queue
if (hipSuccess != hipMemcpyAsync(new_host_to_device_data_d,
&(game_data_h->htd_data),
sizeof(host_to_device_data), hipMemcpyHostToDevice, stream4)){
printf("failure memcpy htd\n");
return 1;
}
update_shots(*game_data_h);
update_enemies(*game_data_h);
game_script(*game_data_h);
// reset queue
hipDeviceSynchronize();
game_data_h->htd_data.queue_count = 0;
container_extract_all_bullet_draw_info << <dimGrid, dimBlock, 0, stream2 >> >(
data_d, draw_d);
glClear(GL_COLOR_BUFFER_BIT);
draw_screen(game_data_h);
hipDeviceSynchronize();
container_update_all_bullet << <dimGrid, dimBlock, 0, stream1 >> >(data_d);
container_collide_against_player << <dimGrid, dimBlock, 0, stream1 >> >(data_d, game_data_h->main_player);
container_add_new_bullets << <dimGrid, dimBlock, 0, stream1 >> >(data_d, new_host_to_device_data_d);
set_player_velocity(game_data_h->main_player, window);
update_player(game_data_h->main_player, *game_data_h);
hipDeviceSynchronize();
mark_bullet_pull << <dimGrid, dimBlock, 0, stream1 >> >(data_d);
relocate_all_bullet << <dimGrid, dimBlock, 0, stream1 >> >(data_d);
if (hipSuccess != hipMemcpyAsync(&(game_data_h->dth_data), draw_d,
sizeof(device_to_host_data), hipMemcpyDeviceToHost, stream3)){
printf("failure memcpy dth\n");
return 1;
}
//Swap buffers
glfwSwapBuffers(window);
//Get and organize events, like keyboard and mouse input, window resizing, etc...
glfwPollEvents();
}
} //Check if the ESC key had been pressed or if the window had been closed
while (!glfwWindowShouldClose(window));
hipFree(data_d);
hipFree(draw_d);
hipFree(new_host_to_device_data_d);
hipHostFree(game_data_h);
//Close OpenGL window and terminate GLFW
glfwDestroyWindow(window);
//Finalize and clean up GLFW
glfwTerminate();
exit(EXIT_SUCCESS);
}
//Define an error callback
static void error_callback(int error, const char* description)
{
fputs(description, stderr);
_fgetchar();
}
|
2dd9d67b829622e202ed50b74be370a3f28003e1.cu
|
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
// ://computer-graphics.se/hello-world-for-cuda.html
//Include GLEW
#include <GL/glew.h>
//Include GLFW
#include <GLFW/glfw3.h>
#include <stdio.h>
#define _USE_MATH_DEFINES
#include <math.h>
const int block_width = 1024;
const int block_height = 1;
const int MAX_BULLET_COUNT = 100000;
const int MAX_ENEMY_COUNT = 100;
const int MAX_SHOT_COUNT = 1000;
const int MAX_host_to_device_data_COUNT = 1000;
const int GAMEFIELD_SEMIWIDTH = 320;//6 * 32;
const int GAMEFIELD_SEMIHEIGHT = 240;//7 * 32;
const int PLAYER_TOLERANCE = -20;
const int BULLET_TOLERANCE = 50;
const int ENEMY_TOLERANCE = 100;
const int SHOT_TOLERANCE = 100;
const int PLAYER_FORM_TIME = 15;
const int PLAYER_ACTIVE_INVUL_TIME = 180;
const int PLAYER_FADE_TIME = 15;
const int PLAYER_DEAD_TIME = 30;
const int ENEMY_FORM_TIME = 0;
const int ENEMY_FADE_TIME = 15;
const int SHOT_FADE_TIME = 5;
enum entity_status { FORM, ACTIVE, FADE, DEAD };
enum bullet_type { BASIC };
//Define an error callback
static void error_callback(int error, const char* description);
struct point;
struct polar_point;
struct host_data;
struct point{
double x;
double y;
__host__ __device__ point();
__host__ __device__ point(const point& p);
__host__ __device__ point(double x, double y);
__host__ __device__ point(const polar_point& p);
};
struct polar_point{
double r;
double t;
__host__ __device__ polar_point();
__host__ __device__ polar_point(const polar_point& p);
__host__ __device__ polar_point(double r, double t);
__host__ __device__ polar_point(const point& p);
};
point::point() : x(0), y(0){}
point::point(const point& p) : x(p.x), y(p.y){}
point::point(double x, double y) : x(x), y(y){}
point::point(const polar_point& p) {
x = p.r*cos(p.t);
y = p.r*sin(p.t);
}
__host__ __device__ point operator+(point & a, point & b){
point return_value(a.x + b.x, a.y + b.y);
return return_value;
}
__host__ __device__ point operator*(point & a, point & b){
point return_value(a.x*b.x, a.y*b.y);
return return_value;
}
__host__ __device__ point operator/(point & a, point & b){
point return_value(a.x/b.x, a.y/b.y);
return return_value;
}
__host__ __device__ point operator-(point & a, point & b){
point return_value(a.x-b.x, a.y-b.y);
return return_value;
}
polar_point::polar_point() : r(0), t(0){};
polar_point::polar_point(const polar_point& p) : r(p.r), t(p.t){}
polar_point::polar_point(double r, double t) : r(r), t(t){}
polar_point::polar_point(const point& p) {
r = hypot(p.x, p.y);
t = ((p.x != 0) || (p.y != 0)) ?
atan2(p.y, p.x) :
0;
}
struct shot{
point pos;
point vel;
point semi_size;
int damage;
entity_status status;
int age;
};
struct shot_container{
shot shot_list[MAX_SHOT_COUNT];
int shot_count;
};
typedef struct bullet_t{
point pos;
point vel;
point acc;
entity_status status;
bullet_type type;
int age;
double theta;
double w;
__host__ __device__ bullet_t() : status(FORM), type(BASIC), age(0), w(0){}
__host__ __device__ bullet_t(bullet_t& b) : pos(b.pos), vel(b.vel), acc(b.acc),
status(b.status), type(b.type), age(b.age),
theta(b.theta), w(b.w) {}
} bullet;
typedef struct pull_info_t{
bool need;
int delta;
} pull_info;
typedef struct bullet_slot_t{
bullet load;
pull_info pull;
} bullet_slot;
typedef struct bullet_draw_info_t{
point pos;
double theta;
entity_status status;
bullet_type type;
} bullet_draw_info;
typedef struct bullet_container_t{
bullet_slot bullet_slots[MAX_BULLET_COUNT];
int bullet_count;
bool collision_with_player;
} bullet_container;
typedef struct device_to_host_data_t{
bullet_draw_info draw_slots[MAX_BULLET_COUNT];
int bullet_count;
bool collision_with_player;
} device_to_host_data;
typedef struct host_to_device_data_t{
bullet bullets[MAX_host_to_device_data_COUNT];
int queue_count;
} host_to_device_data;
struct player{
point pos;
point vel;
static const int radius = 3;
entity_status status;
int age;
bool invul;
bool is_hit;
bool is_focus;
bool is_shooting;
__host__ __device__ player() : pos(0, -GAMEFIELD_SEMIHEIGHT*.8), vel(0, 0), status(FORM), age(0),
invul(false){}
};
struct enemy{
point pos;
point vel;
entity_status status;
int hp;
int age;
double radius;
void(*update) (enemy&, host_data&);
};
struct enemy_container{
enemy enemy_list[MAX_ENEMY_COUNT];
int enemy_count;
__host__ __device__ enemy_container(): enemy_count(0) {}
};
typedef struct draw_data_t{
bullet_draw_info* bullet_draw_infos;
int* bullet_count;
player* player_info;
} draw_data;
struct bullet_properties{
point semi_size;
};
struct host_data{
enemy_container enemies;
host_to_device_data htd_data;
player main_player;
device_to_host_data dth_data;
shot_container shots;
int age;
int deaths;
int enemies_killed;
};
__device__ __host__ point rotate_point(const point& pt,const double theta){
point return_value(cos(theta)*pt.x - sin(theta)*pt.y,
cos(theta)*pt.y + sin(theta)*pt.x);
return return_value;
}
__device__ __host__ point transpose_point(const point& pt){
point return_value(pt.y,pt.x);
return return_value;
}
__device__ __host__ bullet_properties get_bullet_properties(bullet& bullet){
bullet_properties return_value;
switch (bullet.type)
{
case BASIC:
return_value.semi_size = point(2, 2);
break;
default:
break;
}
return return_value;
}
__global__ void container_initialize_all_bullet(bullet_container* data,
size_t bullet_count)
{
data->bullet_count = bullet_count;
int bullet_index = threadIdx.x;
while (bullet_index < bullet_count){
polar_point rt_vector = { 0, 0 };
data->bullet_slots[bullet_index].load.pos = point(rt_vector);
rt_vector.r = bullet_index*.0001;
rt_vector.t = bullet_index;
data->bullet_slots[bullet_index].load.vel = point(rt_vector);
rt_vector.r = -.001;
rt_vector.t = bullet_index;
data->bullet_slots[bullet_index].load.acc = point(rt_vector);
data->bullet_slots[bullet_index].load.theta = rt_vector.t;
data->bullet_slots[bullet_index].load.w = -.001;
data->bullet_slots[bullet_index].load.status = ACTIVE;
bullet_index += blockDim.x;
}
}
__global__ void container_add_new_bullets(bullet_container* data, host_to_device_data* new_bullets){
int old_bullet_count = data->bullet_count;
int add_bullet_count = new_bullets->queue_count;
int bullet_index = threadIdx.x;
while (bullet_index < add_bullet_count){
data->bullet_slots[old_bullet_count + bullet_index].load = new_bullets->bullets[bullet_index];
bullet_index += blockDim.x;
}
data->bullet_count = old_bullet_count + add_bullet_count;
}
__host__ __device__ bool in_bounds(point& pos, double tolerance){
if (abs(pos.x) > GAMEFIELD_SEMIWIDTH + tolerance ||
abs(pos.y) > GAMEFIELD_SEMIHEIGHT + tolerance){
return false;
}
else {
return true;
}
}
__device__ void check_bounds_bullet(bullet* bullet){
if (!in_bounds(bullet->pos, BULLET_TOLERANCE)){
bullet->status = DEAD;
}
}
__device__ void update_bullet(bullet* bullet){
bullet->pos.x += bullet->vel.x;
bullet->pos.y += bullet->vel.y;
bullet->vel.x += bullet->acc.x;
bullet->vel.y += bullet->acc.y;
bullet->theta += bullet->w;
bullet->age += 1;
bullet->theta++;
check_bounds_bullet(bullet);
}
__global__ void update_all_bullet(bullet* bullets,
size_t bullet_count)
{
int bullet_index = threadIdx.x;
while (bullet_index < bullet_count){
update_bullet(bullets + bullet_index);
bullet_index += blockDim.x;
}
}
__global__ void container_update_all_bullet(bullet_container* data)
{
int bullet_index = threadIdx.x;
int count = data->bullet_count;
while (bullet_index < count){
update_bullet(&(data->bullet_slots[bullet_index].load));
bullet_index += blockDim.x;
}
}
__global__ void mark_bullet_pull(bullet_container* container){
int slot_range_width = 1 + (container->bullet_count - 1) / ((int)blockDim.x);
int offset = 0;
for (int i = 0; i < slot_range_width; ++i){
int index = i + slot_range_width*threadIdx.x;
if (container->bullet_slots[index].load.status == DEAD){
++offset;
container->bullet_slots[index].pull.need = false;
} else {
container->bullet_slots[index].pull.need = true;
}
container->bullet_slots[index].pull.delta = offset;
}
for (int k = 1; k <= blockDim.x; k = k << 1){
__syncthreads();
int delta = 0;
if ((k & threadIdx.x) && ((threadIdx.x / k)*slot_range_width*k >= 1)){
delta = container->bullet_slots[(threadIdx.x / k)*slot_range_width*k - 1].pull.delta;
}
for (int i = 0; i < slot_range_width; ++i){
int index = i + slot_range_width*threadIdx.x;
container->bullet_slots[index].pull.delta += delta;
}
}
}
__global__ void relocate_all_bullet(volatile bullet_container* container){
int bullet_index = threadIdx.x;
int count = container->bullet_count;
int delta;
while (bullet_index < count){
bullet load;
load = *((bullet*) &(container->bullet_slots[bullet_index].load));
__syncthreads();
delta = container->bullet_slots[bullet_index].pull.delta;
if (container->bullet_slots[bullet_index].pull.need && (bullet_index - delta >= 0)){
*((bullet*)&(container->bullet_slots[bullet_index-delta].load)) = load;
}
bullet_index += blockDim.x;
}
if (bullet_index == count - 1 + blockDim.x){
container->bullet_count = count - delta;
}
}
__device__ void extract_bullet_draw_info(bullet* bullet,
bullet_draw_info* output){
output->pos = bullet->pos;
output->theta = bullet->theta;
output->status = bullet->status;
output->type = bullet->type;
}
__global__ void container_extract_all_bullet_draw_info(bullet_container* b_container,
device_to_host_data* d_container){
int bullet_index = threadIdx.x;
d_container->bullet_count = b_container->bullet_count;
while (bullet_index < b_container->bullet_count){
extract_bullet_draw_info(&(b_container->bullet_slots[bullet_index].load),
&(d_container->draw_slots[bullet_index]));
bullet_index += blockDim.x;
}
d_container->collision_with_player = b_container->collision_with_player;
}
__device__ bool collide_against_player(bullet& bullet, player& main_player){
bullet_properties prop = get_bullet_properties(bullet);
point dist_thresh = (prop.semi_size + point(main_player.radius, main_player.radius));
point dist = rotate_point(bullet.pos - main_player.pos, -bullet.theta)* transpose_point(dist_thresh);
if (dist.x*dist.x + dist.y*dist.y < dist_thresh.x*dist_thresh.x*dist_thresh.y*dist_thresh.y){
bullet.status = DEAD;
return true;
}
return false;
}
__global__ void container_collide_against_player(bullet_container* b_container,
player main_player){
int bullet_index = threadIdx.x;
b_container->collision_with_player = false;
bool collided = false;
while (bullet_index < b_container->bullet_count){
collided |= collide_against_player(b_container->bullet_slots[bullet_index].load, main_player);
bullet_index += blockDim.x;
}
if (collided){
b_container->collision_with_player = true;
}
}
void pxlVertex2f(double x, double y){
glVertex2f(x / GAMEFIELD_SEMIWIDTH, y / GAMEFIELD_SEMIHEIGHT);
}
void pxlVertexPos(const point& pt){
glVertex2f(pt.x / GAMEFIELD_SEMIWIDTH, pt.y / GAMEFIELD_SEMIHEIGHT);
}
void gl_setup(GLFWwindow** window){
//Set the error callback
glfwSetErrorCallback(error_callback);
//Initialize GLFW
if (!glfwInit())
{
exit(EXIT_FAILURE);
}
//Set the GLFW window creation hints - these are optional
//glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); //Request a specific OpenGL version
//glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); //Request a specific OpenGL version
glfwWindowHint(GLFW_SAMPLES, 4); //Request 4x antialiasing
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//Declare a window object
//glEnable(GL_COLOR_MATERIAL);
//Create a window and create its OpenGL context
*window = glfwCreateWindow(2*GAMEFIELD_SEMIWIDTH, 2*GAMEFIELD_SEMIHEIGHT, "Test Window", NULL, NULL);
//If the window couldn't be created
if (!*window)
{
fprintf(stderr, "Failed to open GLFW window.\n");
glfwTerminate();
exit(EXIT_FAILURE);
}
//This function makes the context of the specified window current on the calling thread.
glfwMakeContextCurrent(*window);
//Initialize GLEW
GLenum err = glewInit();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//If GLEW hasn't initialized
if (err != GLEW_OK)
{
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
}
void queue_bullet(bullet& new_bullet, host_to_device_data& queue){
if (queue.queue_count < MAX_host_to_device_data_COUNT){
queue.bullets[queue.queue_count] = new_bullet;
++(queue.queue_count);
}
}
void queue_enemy(const enemy& new_enemy, host_data& game_data_h){
game_data_h.enemies.enemy_list[game_data_h.enemies.enemy_count] = new_enemy;
++game_data_h.enemies.enemy_count;
}
void queue_shot(const shot& new_shot, host_data& game_data_h){
game_data_h.shots.shot_list[game_data_h.shots.shot_count] = new_shot;
++game_data_h.shots.shot_count;
}
void draw_bullet(bullet_draw_info* info){
//glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_POLYGON); // Each set of 4 vertices form a quad
if (info->status == DEAD){
glColor3f(1.0, 0.0, 0.0); // Red
}
else {
glColor4f(1.0, 0.0, 1.0, 1.0);
}
for (int i = 0; i < 8; ++i){
pxlVertexPos(info->pos + point(polar_point(5, i * 2 * M_PI / 8)));
}
glEnd();
glBegin(GL_POLYGON); // Each set of 4 vertices form a quad
if (info->status == DEAD){
glColor3f(1.0, 0.0, 0.0); // Red
}
else {
glColor4f(1.0, 1.0, 1.0, .8);
}
for (int i = 0; i < 8; ++i){
pxlVertexPos(info->pos + point(polar_point(4, i * 2 * M_PI / 8)));
}
glEnd();
}
void draw_player(player& main_player){
glBegin(GL_QUADS); // Each set of 4 vertices form a quad
double ratio;
switch (main_player.status){
case FORM:
ratio = ((double)main_player.age) / PLAYER_FORM_TIME;
glColor4f(1.0, 0.5, 0.5, ratio);
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y - 25);
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y - 25);
break;
case ACTIVE:
if (main_player.invul && (main_player.age & 4)){
glColor4f(1.0, .6, .6, 1.0);
}
else {
glColor4f(1.0, 0.5, 0.5, 1.0);
}
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y + 25);
pxlVertex2f(main_player.pos.x - 25, main_player.pos.y - 25);
pxlVertex2f(main_player.pos.x + 25, main_player.pos.y - 25);
break;
case FADE:
ratio = ((double)main_player.age) / PLAYER_FADE_TIME;
glColor4f(1.0, 0.5, 0.5, 1.0 - ratio);
pxlVertex2f(main_player.pos.x + 25 * (1.0 + ratio), main_player.pos.y + 25 * (1.0 + ratio));
pxlVertex2f(main_player.pos.x - 25 * (1.0 + ratio), main_player.pos.y + 25 * (1.0 + ratio));
pxlVertex2f(main_player.pos.x - 25 * (1.0 + ratio), main_player.pos.y - 25 * (1.0 + ratio));
pxlVertex2f(main_player.pos.x + 25 * (1.0 + ratio), main_player.pos.y - 25 * (1.0 + ratio));
break;
case DEAD:
glColor4f(0.0, 0.0, 0.0, .0);
break;
default:
glColor3f(0.0, 0.0, 0.0);
break;
}
glEnd();
}
void draw_player_hitbox(player& main_player){
glBegin(GL_QUADS);
glColor3f(1.0, 1.0, 1.0);
pxlVertex2f(main_player.pos.x + 3, main_player.pos.y + 3);
pxlVertex2f(main_player.pos.x - 3, main_player.pos.y + 3);
pxlVertex2f(main_player.pos.x - 3, main_player.pos.y - 3);
pxlVertex2f(main_player.pos.x + 3, main_player.pos.y - 3);
glEnd();
}
void draw_enemy(enemy& enemy){
glBegin(GL_QUADS);
double mult = 1;
glColor3f(1.0, 0.5, 0.0);
if (enemy.status == FADE){
double ratio = (1.0*enemy.age) / (ENEMY_FADE_TIME);
mult = 1 + ratio;
glColor4f(1.0, 0.5, 0.0, 1-ratio);
}
pxlVertex2f(enemy.pos.x + mult*enemy.radius, enemy.pos.y + mult*enemy.radius);
pxlVertex2f(enemy.pos.x - mult*enemy.radius, enemy.pos.y + mult*enemy.radius);
pxlVertex2f(enemy.pos.x - mult*enemy.radius, enemy.pos.y - mult*enemy.radius);
pxlVertex2f(enemy.pos.x + mult*enemy.radius, enemy.pos.y - mult*enemy.radius);
glEnd();
}
void draw_shot(shot& shot){
glBegin(GL_QUADS);
double t = polar_point(shot.vel).t - M_PI_2;
point c1 = rotate_point(point(1, 1)*shot.semi_size, t);
point c2 = rotate_point(point(1, -1)*shot.semi_size, t);
point c3 = rotate_point(point(-1, -1)* shot.semi_size, t);
point c4 = rotate_point(point(-1, 1)*shot.semi_size, t);
if (shot.status == ACTIVE)
{
glColor4f(0.5, 0.5, 1.0, .5);
pxlVertexPos(shot.pos + c1);
pxlVertexPos(shot.pos + c2);
pxlVertexPos(shot.pos + c3);
pxlVertexPos(shot.pos + c4);
}
if (shot.status == FADE){
double ratio = (1.0*shot.age) / SHOT_FADE_TIME;
point ratio_pt(1.0 + ratio, 1.0 + ratio);
glColor4f(0.5, 0.5, 1.0, .5*(1-ratio));
pxlVertexPos(shot.pos + c1*ratio_pt);
pxlVertexPos(shot.pos + c2*ratio_pt);
pxlVertexPos(shot.pos + c3*ratio_pt);
pxlVertexPos(shot.pos + c4*ratio_pt);
}
glEnd();
}
void draw_screen(host_data* game_data){
glClear(GL_COLOR_BUFFER_BIT);
draw_player(game_data->main_player);
for (int i = 0; i < game_data->enemies.enemy_count; ++i){
draw_enemy((game_data->enemies.enemy_list[i]));
}
for (int i = 0; i < game_data->shots.shot_count; ++i){
draw_shot((game_data->shots.shot_list[i]));
}
for (int i = game_data->dth_data.bullet_count - 1; i >= 0; --i){
draw_bullet(&(game_data->dth_data.draw_slots[i]));
}
draw_player_hitbox(game_data->main_player);
}
__device__ __host__ point set_mag_point(point& pt, double mag){
polar_point polar_pt(pt);
polar_pt.r = mag;
point return_value(polar_pt);
return return_value;
}
void set_player_velocity(player& main_player, GLFWwindow *window){
if (glfwGetKey(window, GLFW_KEY_UP)){
main_player.vel.y = 1;
}
else if (glfwGetKey(window, GLFW_KEY_DOWN)){
main_player.vel.y = -1;
}
else {
main_player.vel.y = 0;
}
if (glfwGetKey(window, GLFW_KEY_RIGHT)){
main_player.vel.x = 1;
}
else if (glfwGetKey(window, GLFW_KEY_LEFT)){
main_player.vel.x = -1;
}
else {
main_player.vel.x = 0;
}
if (glfwGetKey(window, GLFW_KEY_LEFT_SHIFT)){
main_player.is_focus = true;
if (main_player.vel.x != 0 || main_player.vel.y != 0){
main_player.vel = set_mag_point(main_player.vel, 1.0);
}
}
else {
main_player.is_focus = false;
if (main_player.vel.x != 0 || main_player.vel.y != 0){
main_player.vel = set_mag_point(main_player.vel, 3.0);
}
}
if (glfwGetKey(window, GLFW_KEY_Z)){
main_player.is_shooting = true;
}
else {
main_player.is_shooting = false;
}
}
void move_player(player& main_player){
main_player.pos.x += main_player.vel.x;
main_player.pos.y += main_player.vel.y;
if (abs(main_player.pos.x) > GAMEFIELD_SEMIWIDTH + PLAYER_TOLERANCE){
main_player.pos.x = (GAMEFIELD_SEMIWIDTH + PLAYER_TOLERANCE) *
((main_player.pos.x > 0) - (main_player.pos.x < 0));
}
if (abs(main_player.pos.y) > GAMEFIELD_SEMIHEIGHT + PLAYER_TOLERANCE){
main_player.pos.y = (GAMEFIELD_SEMIHEIGHT + PLAYER_TOLERANCE) *
((main_player.pos.y > 0) - (main_player.pos.y < 0));
}
}
void update_player(player& main_player, host_data& game_data_h){
main_player.is_hit |= game_data_h.dth_data.collision_with_player;
switch (main_player.status){
case FORM:
main_player.invul = true;
if (main_player.age > PLAYER_FORM_TIME){
main_player.status = ACTIVE;
main_player.age = 0;
}
break;
case ACTIVE:
if (main_player.age == PLAYER_ACTIVE_INVUL_TIME){
main_player.invul = false;
}
move_player(main_player);
if (main_player.is_hit && !main_player.invul){
main_player.status = FADE;
main_player.age = 0;
}
if (main_player.age % 6 == 0 && main_player.is_shooting){
shot new_shot;
new_shot.damage = 8;
new_shot.semi_size = point(3, 12);
new_shot.status = FORM;
new_shot.age = 0;
double spread = main_player.is_focus ? 1.0 : 3.0;
new_shot.vel = point(0, 10);
new_shot.pos = main_player.pos + point(-15, -10);
queue_shot(new_shot, game_data_h);
new_shot.pos = main_player.pos + point(15, -10);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 + .02*spread);
new_shot.pos = main_player.pos + point(-4 * spread, 5);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 - .02*spread);
new_shot.pos = main_player.pos + point(4 * spread, 5);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 - .06*spread);
new_shot.pos = main_player.pos + point(12 * spread, 5);
queue_shot(new_shot, game_data_h);
new_shot.vel = polar_point(8, M_PI_2 + .06*spread);
new_shot.pos = main_player.pos + point(-12 * spread, 5);
queue_shot(new_shot, game_data_h);
}
break;
case FADE:
if (main_player.age > PLAYER_FADE_TIME){
main_player.status = DEAD;
main_player.age = 0;
game_data_h.deaths++;
}
break;
case DEAD:
if (main_player.age > PLAYER_DEAD_TIME){
main_player.status = FORM;
main_player.age = 0;
}
break;
default:
break;
}
main_player.is_hit = false;
++main_player.age;
}
void generic_enemy_update(enemy& self, host_data& game_data_h){
point diff;
switch (self.status){
case FORM:
if (self.age > ENEMY_FORM_TIME){
self.status = ACTIVE;
self.age = 0;
}
break;
case ACTIVE:
self.pos = self.pos + self.vel;
diff = self.pos - game_data_h.main_player.pos;
if (abs(diff.x) < self.radius + game_data_h.main_player.radius &&
abs(diff.y) < self.radius + game_data_h.main_player.radius){
game_data_h.main_player.is_hit = true;
}
if (!in_bounds(self.pos, ENEMY_TOLERANCE) || self.hp <= 0){
self.status = FADE;
self.age = 0;
if (self.hp <= 0){
game_data_h.enemies_killed++;
}
}
break;
case FADE:
if (self.age > ENEMY_FADE_TIME){
self.status = DEAD;
self.age = 0;
}
break;
case DEAD:
break;
default:
break;
}
++self.age;
}
void update_function_1(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 60 == 30){
bullet sample;
sample.pos = self.pos;
polar_point diff = game_data_h.main_player.pos - self.pos;
for (int dir = -1; dir <= 1; dir += 2){
for (int mag = 0; mag < 4; ++mag){
polar_point new_vel((.5+.3*mag), diff.t + dir*.1);
sample.vel = new_vel;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
}
void update_function_2(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 120 == 0 && self.pos.y > GAMEFIELD_SEMIHEIGHT*.3){
bullet sample;
polar_point diff = game_data_h.main_player.pos - self.pos;
for (int dir = 0; dir < 32; ++dir){
double t = diff.t + dir * 2 * M_PI / 32;
sample.pos = self.pos + point(polar_point(50,t));
for (int j = -1; j <= 1; j += 2){
for (int mag = 5; mag <= 11; mag += 1){
polar_point new_vel(mag*.1*j, t + M_PI_2);
sample.vel = new_vel;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
}
}
void update_function_3(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 60 == 0 && self.pos.y > GAMEFIELD_SEMIHEIGHT*.3){
bullet sample;
polar_point diff = game_data_h.main_player.pos - self.pos;
for (int dir = -3; dir <= 3; ++dir){
double t = diff.t + dir * .2;
sample.pos = self.pos;
for (int mag = 3; mag <= 5; mag += 1){
polar_point new_vel(mag*.2, t);
sample.vel = new_vel;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
}
void update_function_4(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
if (self.age % 10 == 0 && self.pos.y > GAMEFIELD_SEMIHEIGHT*-.1){
bullet sample;
for (int dir = 0; dir < 8; ++dir){
double t = dir * 2 * M_PI / 8 + self.age;
sample.pos = self.pos + point(polar_point(30, t + M_PI_2));
polar_point new_acc(.001, t);
sample.acc = new_acc;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
void update_function_5(enemy& self, host_data& game_data_h){
generic_enemy_update(self, game_data_h);
int arms = 7;
if (self.age % 2 == 0 && self.age > 300){
bullet sample;
self.vel = point(0, 0);
for (int dir = 0; dir < arms; ++dir){
double t = dir * 2 * M_PI / arms + 0.0002*self.age*self.age;
sample.pos = self.pos;
polar_point new_vel(1.0, t);
sample.vel = new_vel;
polar_point new_acc(-.005, t);
sample.acc = new_acc;
queue_bullet(sample, game_data_h.htd_data);
}
}
}
enemy enemy1(point pos, point vel){
enemy return_value;
return_value.pos = pos;
return_value.vel = vel;
return_value.status = FORM;
return_value.radius = 20;
return_value.update = update_function_2;
return_value.hp = 2000;
return_value.age = 0;
return return_value;
}
enemy set_enemy(point pos, point vel, int hp, void(*update_function)(enemy&, host_data&)){
enemy return_value;
return_value.pos = pos;
return_value.vel = vel;
return_value.status = FORM;
return_value.radius = 20;
return_value.update = update_function;
return_value.hp = hp;
return_value.age = 0;
return return_value;
}
void update_enemies(host_data& game_data_h){
for (int i = 0; i < game_data_h.enemies.enemy_count; ++i){
game_data_h.enemies.enemy_list[i].update(
game_data_h.enemies.enemy_list[i], game_data_h);
}
int j = 0;
for (int i = 0; i < game_data_h.enemies.enemy_count; ++i){
if (game_data_h.enemies.enemy_list[i].status != DEAD){
game_data_h.enemies.enemy_list[j] = game_data_h.enemies.enemy_list[i];
++j;
}
}
game_data_h.enemies.enemy_count = j;
}
void update_shot(shot& shot, host_data& game_data_h){
point diff;
polar_point polar_vel;
switch (shot.status){
case FORM:
shot.status = ACTIVE;
shot.age = 0;
break;
case ACTIVE:
shot.pos = shot.pos + shot.vel;
polar_vel = shot.vel;
for (int i = 0; i < game_data_h.enemies.enemy_count; ++i){
diff = rotate_point(game_data_h.enemies.enemy_list[i].pos - shot.pos, -polar_vel.t);
if (abs(diff.x) < shot.semi_size.x + game_data_h.enemies.enemy_list[i].radius &&
abs(diff.y) < shot.semi_size.y + game_data_h.enemies.enemy_list[i].radius){
game_data_h.enemies.enemy_list[i].hp -= shot.damage;
shot.status = FADE;
shot.age = 0;
}
}
if (!in_bounds(shot.pos, SHOT_TOLERANCE)){
shot.status = FADE;
shot.age = 0;
}
break;
case FADE:
shot.pos = shot.pos + shot.vel;
if (shot.age > SHOT_FADE_TIME){
shot.status = DEAD;
shot.age = 0;
}
break;
case DEAD:
break;
default:
break;
}
++shot.age;
}
void update_shots(host_data& game_data_h){
for (int i = 0; i < game_data_h.shots.shot_count; ++i){
update_shot(
game_data_h.shots.shot_list[i], game_data_h);
}
int j = 0;
for (int i = 0; i < game_data_h.shots.shot_count; ++i){
if (game_data_h.shots.shot_list[i].status != DEAD){
game_data_h.shots.shot_list[j] = game_data_h.shots.shot_list[i];
++j;
}
}
game_data_h.shots.shot_count = j;
}
void game_script(host_data& game_data_h){
if (game_data_h.age == 3000){
queue_enemy(enemy1(point(GAMEFIELD_SEMIWIDTH*-.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, 3 * M_PI_2)), game_data_h);
}
if (game_data_h.age == 2200){
queue_enemy(enemy1(point(GAMEFIELD_SEMIWIDTH*.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, 3 * M_PI_2)), game_data_h);
}
if (game_data_h.age == 1120 || game_data_h.age == 1120 + 60 || game_data_h.age == 1120 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 7.5 * M_PI_4), 300, update_function_4), game_data_h);
}
if (game_data_h.age == 1480 || game_data_h.age == 1480 + 60 || game_data_h.age == 1480 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 4.5 * M_PI_4), 300, update_function_4), game_data_h);
}
if (game_data_h.age == 120 || game_data_h.age == 120 + 60 || game_data_h.age == 120 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 6.5 * M_PI_4), 300, update_function_3), game_data_h);
}
if (game_data_h.age == 480 || game_data_h.age == 480 + 60 || game_data_h.age == 480 + 120){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.5, 5.5 * M_PI_4), 300, update_function_3), game_data_h);
}
if (game_data_h.age >= 4000 && game_data_h.age <= 4500 && game_data_h.age%50==0){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*.1, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(2.5, 7 * M_PI_4), 100, update_function_1), game_data_h);
}
if (game_data_h.age == 5000){
for (int i = 0; i < 5; ++i){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 + .5*i) * M_PI_4), 200, update_function_4), game_data_h);
}
}
if (game_data_h.age == 6000){
for (int i = 0; i < 5; ++i){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 - .5*i) * M_PI_4), 200, update_function_4), game_data_h);
}
}
if (game_data_h.age == 7000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 + .5*1) * M_PI_4), 100, update_function_4), game_data_h);
}
if (game_data_h.age == 8000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.3, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.7, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(1.0, (6 - .5 * 1) * M_PI_4), 100, update_function_4), game_data_h);
}
if (game_data_h.age == 9000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*-.6, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH*+.6, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
}
if (game_data_h.age == 9060){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH * 0, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(.3, (6) * M_PI_4), 700, update_function_2), game_data_h);
}
if (game_data_h.age == 10000){
queue_enemy(set_enemy(point(GAMEFIELD_SEMIWIDTH * 0, GAMEFIELD_SEMIHEIGHT*1.1),
polar_point(0.5, (6) * M_PI_4), 10000, update_function_5), game_data_h);
}
++game_data_h.age;
}
int main()
{
bullet_container* data_d;
device_to_host_data* draw_d;
//device_to_host_data* draw_h;
host_to_device_data* new_host_to_device_data_d;
//host_to_device_data* new_host_to_device_data_h;
host_data* game_data_h;
GLFWwindow* window;
gl_setup(&window);
const int bullets_count = 0;
const int bullets_size = MAX_BULLET_COUNT*sizeof(bullet);
const int bullet_draw_infos_size = MAX_BULLET_COUNT*sizeof(bullet_draw_info);
dim3 dimBlock(block_width, block_height);
dim3 dimGrid(1, 1);
cudaMalloc((void**)&data_d, sizeof(bullet_container));
cudaMalloc((void**)&draw_d, sizeof(bullet_container));
cudaMalloc((void**)&new_host_to_device_data_d, sizeof(host_to_device_data));
/*cudaMallocHost((void**)&draw_h, sizeof(device_to_host_data));
cudaMallocHost((void**)&new_host_to_device_data_h, sizeof(host_to_device_data));*/
cudaMallocHost((void**)&game_data_h, sizeof(host_data));
/*draw_data_pointers.bullet_count = &(draw_h->bullet_count);
draw_data_pointers.bullet_draw_infos = (draw_h->draw_slots);
draw_data_pointers.player_info = &(main_player);*/
game_data_h->age = 0;
game_data_h->main_player = player();
cudaStream_t stream1, stream2, stream3, stream4;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
container_initialize_all_bullet <<<dimGrid, dimBlock, 0, stream4 >>>(
data_d,
bullets_count);
cudaStreamSynchronize(stream4);
//Set a background color
glClearColor(0.0f, 0.0f, .5f, 0.0f);
double time = glfwGetTime();
const double FRAME_PERIOD = 1.0l / 60.0l;
game_data_h->htd_data.queue_count = 0;
//Main Loop
do
{
if (glfwGetTime() - time >= FRAME_PERIOD){
printf("Frame Rate: %f\n Bullets: %d\n Deaths: %d\n Enemies Killed: %d\n",
1.0 / (glfwGetTime() - time),
game_data_h->dth_data.bullet_count,
game_data_h->deaths,
game_data_h->enemies_killed
);
time = glfwGetTime();
//test_queue_bullet(game_data_h->htd_data);
// move bullets to queue
if (cudaSuccess != cudaMemcpyAsync(new_host_to_device_data_d,
&(game_data_h->htd_data),
sizeof(host_to_device_data), cudaMemcpyHostToDevice, stream4)){
printf("failure memcpy htd\n");
return 1;
}
update_shots(*game_data_h);
update_enemies(*game_data_h);
game_script(*game_data_h);
// reset queue
cudaDeviceSynchronize();
game_data_h->htd_data.queue_count = 0;
container_extract_all_bullet_draw_info << <dimGrid, dimBlock, 0, stream2 >> >(
data_d, draw_d);
glClear(GL_COLOR_BUFFER_BIT);
draw_screen(game_data_h);
cudaDeviceSynchronize();
container_update_all_bullet << <dimGrid, dimBlock, 0, stream1 >> >(data_d);
container_collide_against_player << <dimGrid, dimBlock, 0, stream1 >> >(data_d, game_data_h->main_player);
container_add_new_bullets << <dimGrid, dimBlock, 0, stream1 >> >(data_d, new_host_to_device_data_d);
set_player_velocity(game_data_h->main_player, window);
update_player(game_data_h->main_player, *game_data_h);
cudaDeviceSynchronize();
mark_bullet_pull << <dimGrid, dimBlock, 0, stream1 >> >(data_d);
relocate_all_bullet << <dimGrid, dimBlock, 0, stream1 >> >(data_d);
if (cudaSuccess != cudaMemcpyAsync(&(game_data_h->dth_data), draw_d,
sizeof(device_to_host_data), cudaMemcpyDeviceToHost, stream3)){
printf("failure memcpy dth\n");
return 1;
}
//Swap buffers
glfwSwapBuffers(window);
//Get and organize events, like keyboard and mouse input, window resizing, etc...
glfwPollEvents();
}
} //Check if the ESC key had been pressed or if the window had been closed
while (!glfwWindowShouldClose(window));
cudaFree(data_d);
cudaFree(draw_d);
cudaFree(new_host_to_device_data_d);
cudaFreeHost(game_data_h);
//Close OpenGL window and terminate GLFW
glfwDestroyWindow(window);
//Finalize and clean up GLFW
glfwTerminate();
exit(EXIT_SUCCESS);
}
//Define an error callback
static void error_callback(int error, const char* description)
{
fputs(description, stderr);
_fgetchar();
}
|
631367943de12e4b8ec701a8314aab1a3ad913aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2011
@author Azzam Haidar
@author Tingxing Dong
@generated from zgeqr2_kernels.cu normal z -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
static float neg_one = MAGMA_S_NEG_ONE;
static float one = MAGMA_S_ONE;
static float zero = MAGMA_S_ZERO;
__global__ void
sgeqrf_copy_upper_kernel_batched(
int n, int nb,
float **dV_array, int ldv,
float **dR_array, int ldr)
{
float *dV = dV_array[blockIdx.x];
float *dR = dR_array[blockIdx.x];
int tid = threadIdx.x;
int column = (tid / nb + 1) * nb;
if( tid < n && column < n)
{
for(int i=column; i<n; i++)
{
dR[tid + i * ldr] = dV[tid + i * ldv];
}
}
}
void sgeqrf_copy_upper_batched(
magma_int_t n, magma_int_t nb,
float **dV_array, magma_int_t ldv,
float **dR_array, magma_int_t ldr,
magma_int_t batchCount, magma_queue_t queue)
{
/*
copy some data in dV to dR
*/
if( nb >= n) return ;
hipLaunchKernelGGL(( sgeqrf_copy_upper_kernel_batched), dim3(batchCount), dim3(n), 0, queue, n, nb, dV_array, ldv, dR_array, ldr);
}
extern "C" magma_int_t
magma_slarfb_sgemm_batched(
hipblasHandle_t myhandle,
magma_int_t m, magma_int_t n, magma_int_t k,
float **dV_array, magma_int_t ldv,
float **dT_array, magma_int_t ldt,
float **dA_array, magma_int_t lda,
float **W_array, magma_int_t ldw,
float **W2_array, magma_int_t ldw2,
magma_int_t batchCount, magma_queue_t queue)
{
// W is workspace size of W is nb * n
// W = V^H * A. V is stored in A(i:m, i:ib)
if( m <=0 || n <= 0 || k <=0 ) return 1;
#if 1 // CUBLAS is faster than MAGMABLAS by 17GFLOP/S at size 512 batchCount = 2000
hipblasSgemmBatched(myhandle, HIPBLAS_OP_C, HIPBLAS_OP_N, k, n, m,
&one, (const float**) dV_array, ldv,
(const float**) dA_array, lda,
&zero, W_array, ldw, batchCount );
// W2 = T^H * W
hipblasSgemmBatched(myhandle, HIPBLAS_OP_C, HIPBLAS_OP_N, k, n, k,
&one, (const float**) dT_array, ldt,
(const float**) W_array, ldw,
&zero, W2_array, ldw2, batchCount );
// A = A - V * W2
hipblasSgemmBatched(myhandle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k,
&neg_one, (const float**) dV_array, ldv,
(const float**) W2_array, ldw2,
&one, dA_array, lda, batchCount );
#else
magmablas_sgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, m,
one, (const float**) dV_array, ldv,
(const float**) dA_array, lda,
zero, W_array, ldw, batchCount );
// W2 = T^H * W
magmablas_sgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, k,
one, (const float**) dT_array, ldt,
(const float**) W_array, ldw,
zero, W2_array, ldw2, batchCount );
// A = A - V * W2
magmablas_sgemm_batched(MagmaNoTrans, MagmaNoTrans, m, n, k,
neg_one, (const float**) dV_array, ldv,
(const float**) W2_array, ldw2,
one, dA_array, lda, batchCount );
#endif
return 0;
}
|
631367943de12e4b8ec701a8314aab1a3ad913aa.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2011
@author Azzam Haidar
@author Tingxing Dong
@generated from zgeqr2_kernels.cu normal z -> s, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
static float neg_one = MAGMA_S_NEG_ONE;
static float one = MAGMA_S_ONE;
static float zero = MAGMA_S_ZERO;
__global__ void
sgeqrf_copy_upper_kernel_batched(
int n, int nb,
float **dV_array, int ldv,
float **dR_array, int ldr)
{
float *dV = dV_array[blockIdx.x];
float *dR = dR_array[blockIdx.x];
int tid = threadIdx.x;
int column = (tid / nb + 1) * nb;
if( tid < n && column < n)
{
for(int i=column; i<n; i++)
{
dR[tid + i * ldr] = dV[tid + i * ldv];
}
}
}
void sgeqrf_copy_upper_batched(
magma_int_t n, magma_int_t nb,
float **dV_array, magma_int_t ldv,
float **dR_array, magma_int_t ldr,
magma_int_t batchCount, magma_queue_t queue)
{
/*
copy some data in dV to dR
*/
if( nb >= n) return ;
sgeqrf_copy_upper_kernel_batched<<<batchCount, n, 0, queue>>>(n, nb, dV_array, ldv, dR_array, ldr);
}
extern "C" magma_int_t
magma_slarfb_sgemm_batched(
cublasHandle_t myhandle,
magma_int_t m, magma_int_t n, magma_int_t k,
float **dV_array, magma_int_t ldv,
float **dT_array, magma_int_t ldt,
float **dA_array, magma_int_t lda,
float **W_array, magma_int_t ldw,
float **W2_array, magma_int_t ldw2,
magma_int_t batchCount, magma_queue_t queue)
{
// W is workspace size of W is nb * n
// W = V^H * A. V is stored in A(i:m, i:ib)
if( m <=0 || n <= 0 || k <=0 ) return 1;
#if 1 // CUBLAS is faster than MAGMABLAS by 17GFLOP/S at size 512 batchCount = 2000
cublasSgemmBatched(myhandle, CUBLAS_OP_C, CUBLAS_OP_N, k, n, m,
&one, (const float**) dV_array, ldv,
(const float**) dA_array, lda,
&zero, W_array, ldw, batchCount );
// W2 = T^H * W
cublasSgemmBatched(myhandle, CUBLAS_OP_C, CUBLAS_OP_N, k, n, k,
&one, (const float**) dT_array, ldt,
(const float**) W_array, ldw,
&zero, W2_array, ldw2, batchCount );
// A = A - V * W2
cublasSgemmBatched(myhandle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k,
&neg_one, (const float**) dV_array, ldv,
(const float**) W2_array, ldw2,
&one, dA_array, lda, batchCount );
#else
magmablas_sgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, m,
one, (const float**) dV_array, ldv,
(const float**) dA_array, lda,
zero, W_array, ldw, batchCount );
// W2 = T^H * W
magmablas_sgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, k,
one, (const float**) dT_array, ldt,
(const float**) W_array, ldw,
zero, W2_array, ldw2, batchCount );
// A = A - V * W2
magmablas_sgemm_batched(MagmaNoTrans, MagmaNoTrans, m, n, k,
neg_one, (const float**) dV_array, ldv,
(const float**) W2_array, ldw2,
one, dA_array, lda, batchCount );
#endif
return 0;
}
|
13a2d21a2c4d82a2cd77e5f8d457b53b31095332.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Kernel.cuh"
#include <cmath>
#include <sstream>
namespace axomae {
__device__
const static bool isbigEndian = SDL_BIG_ENDIAN == SDL_BYTEORDER;
__device__
static uint32_t max_int_rgb = 0;
__device__
static uint32_t min_int_rgb = UINT32_MAX;
struct Triplet {
int x;
int y;
int z;
};
struct RGB {
public:
uint8_t r;
uint8_t g;
uint8_t b;
uint8_t a;
__device__
void operator=(RGB rgb) {
this->r = rgb.r;
this->g = rgb.g;
this->b = rgb.b;
this->a = rgb.a;
}
__host__ std::string string() {
return std::to_string(r) + " " + std::to_string(g) + " " + std::to_string(b) + "\n";
}
__device__
RGB operator+(RGB rgb) {
return { uint8_t(r + rgb.r) ,uint8_t(g + rgb.g) ,uint8_t(b + rgb.b) ,uint8_t(a + rgb.a) };
}
template<typename T> __device__
RGB operator+(T rgb) {
return { r + rgb , g + rgb , b + rgb , a + rgb };
}
__device__
RGB operator*(RGB rgb) {
return { uint8_t(r * rgb.r) ,uint8_t(g * rgb.g) ,uint8_t(b * rgb.b) ,uint8_t(a * rgb.a) };
}
template<typename T> __device__
RGB operator*(T value) {
return { uint8_t(r * value) ,uint8_t(g * value) ,uint8_t(b * value) ,uint8_t(a * value) };
}
__device__
RGB normalize_rgb(RGB max , RGB min) {
uint8_t n_red = normalize(max.r ,min.r , r);
uint8_t n_green = normalize(max.g, min.g, g);
uint8_t n_blue = normalize(max.b, min.b, b);
return { n_red , n_green , n_blue , 0 };
}
/*compute the magnitude between to rgb values*/
__device__
RGB magnitude_rgb(RGB horizontal , RGB vertical) {
RGB rgb;
rgb.r = (uint8_t)magnitude(vertical.r, horizontal.r);
rgb.g = (uint8_t)magnitude(vertical.g, horizontal.g);
rgb.b = (uint8_t)magnitude(vertical.b, horizontal.b);
rgb.a = (uint8_t)magnitude(vertical.a, horizontal.a);
return rgb;
}
__device__
void print() {
printf("%i %i %i\n", r, g, b);
}
};
class SDLSurfParam {
public:
unsigned int width;
unsigned int height;
int bpp;
int pitch;
void* data;
SDLSurfParam(SDL_Surface* im) {
width = im->w;
height = im->h;
bpp = im->format->BytesPerPixel;
pitch = im->pitch;
data = im->pixels;
}
SDLSurfParam() {
}
size_t getByteSize() {
return height * pitch;
}
};
template<typename T>
struct custom_convolution_kernel {
T* array;
uint8_t size_w;
uint8_t size_h;
};
struct gpu_threads {
dim3 threads;
dim3 blocks;
};
/*device*/
/*********************************************************************************************************************************************/
__host__ __device__
uint32_t rgb_to_int(RGB val) {
uint32_t value = (isbigEndian) ? val.a | (val.b << 8) | (val.g << 16) | (val.r << 24) : val.r | (val.g << 8) | (val.b << 16) | (val.a << 24);
return value;
}
__device__
void initialize_2D_array(uint32_t *array, int size_w, int size_h) {
int i = blockIdx.x;
int j = threadIdx.x;
array[i*size_w + j] = 0;
}
__device__
RGB compute_greyscale(RGB rgb, const bool luminance) {
RGB ret;
if (luminance) {
ret.r = rgb.r* 0.3 + rgb.g*0.59 + rgb.b*0.11;
ret.g = ret.r;
ret.b = ret.r;
}
else
{
ret.r = (int)((rgb.r + rgb.b + rgb.g) / 3);
ret.g = ret.r;
ret.b = ret.r;
}
return ret;
}
__device__
RGB int_to_rgb(uint8_t* pixel_value, const int bpp) {
RGB rgb = { 0 , 0 , 0 , 0 };
if (bpp == 4) {
if (isbigEndian) {
rgb.r = *pixel_value >> 24 & 0xFF;
rgb.g = *pixel_value >> 16 & 0xFF;
rgb.b = *pixel_value >> 8 & 0xFF;
rgb.a = *pixel_value & 0xFF;
}
else {
rgb.a = *pixel_value >> 24 & 0xFF;
rgb.b = *pixel_value >> 16 & 0xFF;
rgb.g = *pixel_value >> 8 & 0xFF;
rgb.r = *pixel_value & 0xFF;
}
}
else if (bpp == 3) {
if (isbigEndian) {
rgb.r = pixel_value[0];
rgb.g = pixel_value[1];
rgb.b = pixel_value[2];
rgb.a = 0;
}
else {
rgb.b = pixel_value[0];
rgb.g = pixel_value[1];
rgb.r = pixel_value[2];
rgb.a = 0;
}
}
else if (bpp == 2) {
if (isbigEndian) {
rgb.r = *pixel_value >> 12 & 0xF;
rgb.g = *pixel_value >> 8 & 0XF;
rgb.b = *pixel_value >> 4 & 0XF;
rgb.a = *pixel_value & 0XF;
}
else {
rgb.a = *pixel_value >> 12 & 0xF;
rgb.b = *pixel_value >> 8 & 0XF;
rgb.g = *pixel_value >> 4 & 0XF;
rgb.r = *pixel_value & 0XF;
}
}
else if (bpp == 1) {
if (isbigEndian) {
rgb.r = *pixel_value >> 5 & 0X7;
rgb.g = *pixel_value >> 2 & 0X7;
rgb.b = *pixel_value & 0X3;
rgb.a = 0;
}
else {
rgb.b = *pixel_value >> 5 & 0X7;
rgb.g = *pixel_value >> 2 & 0X7;
rgb.r = *pixel_value & 0X3;
rgb.a = 0;
}
}
return rgb;
}
__device__
void set_pixel_color(uint8_t* pixel_value, RGB rgb, const int bpp) {
uint32_t toInt = rgb_to_int(rgb);
if (bpp == 4)
{
*(uint32_t*)(pixel_value) = toInt;
}
else if (bpp == 3) {
if (isbigEndian) {
((uint8_t*)pixel_value)[0] = toInt >> 16 & 0xFF;
((uint8_t*)pixel_value)[1] = toInt >> 8 & 0xFF;
((uint8_t*)pixel_value)[2] = toInt & 0xFF;
}
else {
((uint8_t*)pixel_value)[0] = toInt & 0xFF;
((uint8_t*)pixel_value)[1] = toInt >> 8 & 0xFF;
((uint8_t*)pixel_value)[2] = toInt >> 16 & 0xFF;
}
}
else if (bpp == 2) {
*((uint16_t*)pixel_value) = toInt;
}
else
{
*pixel_value = toInt;
}
}
__device__
RGB get_pixel_value_at(uint8_t *pixel, int i, int j, const int bpp, int pitch) {
uint8_t* p = (uint8_t*)(pixel) + i*bpp + j*pitch;
RGB A = int_to_rgb(p, bpp);
// printf("%i %i %i %i %i %i\n", p[0], p[1], p[2] , A.r , A.g , A.b);
return A;
}
struct convolution_directions {
RGB vertical;
RGB horizontal;
};
//TODO : case kernel < 0
__device__
convolution_directions compute_convolution(uint8_t * pixel, const int bpp, int pitch, const int h_kernel[KERNEL_SIZE][KERNEL_SIZE], const int v_kernel[KERNEL_SIZE][KERNEL_SIZE], uint8_t border_flag) {
RGB center = get_pixel_value_at(pixel, 0, 0, bpp, pitch);
RGB west = get_pixel_value_at(pixel, 0, -1, bpp, pitch); // here : if threadIdx.y = 0 bug
RGB north_west = get_pixel_value_at(pixel, -1, -1, bpp, pitch);
RGB north = get_pixel_value_at(pixel, -1, 0, bpp, pitch);
RGB north_east = get_pixel_value_at(pixel, -1, 1, bpp, pitch);
RGB east = get_pixel_value_at(pixel, 0, 1, bpp, pitch);
RGB south_east = get_pixel_value_at(pixel, 1, 1, bpp, pitch);
RGB south = get_pixel_value_at(pixel, 1, 0, bpp, pitch);
RGB south_west = get_pixel_value_at(pixel, 1, -1, bpp, pitch);
double verticalx = north_west.r * v_kernel[0][0] + north.r*v_kernel[0][1] + north_east.r*v_kernel[0][2] +
west.r * v_kernel[1][0] + center.r*v_kernel[1][1] + east.r*v_kernel[1][2] +
south_west.r * v_kernel[2][0] + south.r*v_kernel[2][1] + south_east.r*v_kernel[2][2];
double verticaly = north_west.g * v_kernel[0][0] + north.g*v_kernel[0][1] + north_east.g*v_kernel[0][2] +
west.g * v_kernel[1][0] + center.g*v_kernel[1][1] + east.g*v_kernel[1][2] +
south_west.g * v_kernel[2][0] + south.g*v_kernel[2][1] + south_east.g*v_kernel[2][2];
double verticalz = north_west.b * v_kernel[0][0] + north.b*v_kernel[0][1] + north_east.b*v_kernel[0][2] +
west.b * v_kernel[1][0] + center.b*v_kernel[1][1] + east.b*v_kernel[1][2] +
south_west.b * v_kernel[2][0] + south.b*v_kernel[2][1] + south_east.b*v_kernel[2][2];
double horizontalx = north_west.r * h_kernel[0][0] + north.r*h_kernel[0][1] + north_east.r*h_kernel[0][2] +
west.r * h_kernel[1][0] + center.r*h_kernel[1][1] + east.r*h_kernel[1][2] +
south_west.r * h_kernel[2][0] + south.r*h_kernel[2][1] + south_east.r*h_kernel[2][2];
double horizontaly = north_west.g * h_kernel[0][0] + north.g*h_kernel[0][1] + north_east.g*h_kernel[0][2] +
west.g * h_kernel[1][0] + center.g*h_kernel[1][1] + east.g*h_kernel[1][2] +
south_west.g * h_kernel[2][0] + south.g*h_kernel[2][1] + south_east.g*h_kernel[2][2];
double horizontalz = north_west.b * h_kernel[0][0] + north.b*h_kernel[0][1] + north_east.b*h_kernel[0][2] +
west.b * h_kernel[1][0] + center.b*h_kernel[1][1] + east.b*h_kernel[1][2] +
south_west.b * h_kernel[2][0] + south.b*h_kernel[2][1] + south_east.b*h_kernel[2][2];
convolution_directions dir;
RGB minn = { 0, 0, 0, 0 };
RGB maxx = { 255 , 255 , 255 , 255 };
auto rh = normalize(maxx.r, minn.r, horizontalx);
auto rv = normalize(maxx.r, minn.r, verticalx);
auto gh = normalize(maxx.r, minn.r, horizontaly);
auto gv = normalize(maxx.r, minn.r, verticaly);
auto bh = normalize(maxx.r, minn.r, horizontalz);
auto bv = normalize(maxx.r, minn.r, verticalz);
dir.vertical = { rv , gv , bv , 0 };
dir.horizontal = { rh , gh , bh , 0};
return dir;
}
/* pos 0 = vertical convolution kernel
pos 1 = horizontal convolution kernel */
__device__
RGB get_convolution_values(uint8_t* pixel, const int bpp, int pitch, uint8_t convolution, uint8_t border) {
int custom_kernel = 0;
convolution_directions convoluted ;
if (custom_kernel == 0) {
if (convolution == AXOMAE_USE_SOBEL) {
convoluted = compute_convolution(pixel, bpp, pitch, sobel_mask_horizontal, sobel_mask_vertical, border);
}
else if (convolution == AXOMAE_USE_PREWITT) {
convoluted = compute_convolution(pixel, bpp, pitch, prewitt_mask_horizontal, prewitt_mask_vertical, border);
}
else {
convoluted = compute_convolution(pixel, bpp, pitch, scharr_mask_horizontal, scharr_mask_vertical, border);
}
RGB var = convoluted.vertical.magnitude_rgb(convoluted.vertical , convoluted.horizontal);
return var;
}
else {
//TODO : add custom kernels processing
return { 0 , 0 , 0 , 0 };
}
}
__device__ RGB compute_normal(uint8_t* pixel, int bpp, int pitch, double factor) {
RGB center = get_pixel_value_at(pixel, 0, 0, bpp, pitch);
RGB west = get_pixel_value_at(pixel, 0, -1, bpp, pitch);
RGB north_west = get_pixel_value_at(pixel, -1, -1, bpp, pitch);
RGB north = get_pixel_value_at(pixel, -1, 0, bpp, pitch);
RGB north_east = get_pixel_value_at(pixel, -1, 1, bpp, pitch);
RGB east = get_pixel_value_at(pixel, 0, 1, bpp, pitch);
RGB south_east = get_pixel_value_at(pixel, 1, 1, bpp, pitch);
RGB south = get_pixel_value_at(pixel, 1, 0, bpp, pitch);
RGB south_west = get_pixel_value_at(pixel, 1, -1, bpp, pitch);
float dx = factor * (east.g - west.g) / 255;
float dy = factor * (north.g - south.g) / 255;
float ddx = factor * (north_east.g - south_west.g) / 255;
float ddy = factor * (north_west.g - south_east.g) / 255;
auto Nx = normalize(-1, 1, lerp(dy, ddy, 0.5));
auto Ny = normalize(-1, 1, lerp(dx, ddx, 0.5));
auto Nz = 255;
if (Nx >= 255)
Nx = 255;
else if (Nx <= 0)
Nx = 0;
if (Ny >= 255)
Ny = 255;
else if (Ny <= 0)
Ny = 0;
return { (int)floor(Nx) , (int)floor(Ny) , Nz , 0 };
}
__device__ static void replace_min(RGB rgb) {
uint32_t* max = &max_int_rgb;
uint8_t* pixel = (uint8_t*)max;
RGB maxx = int_to_rgb(pixel, 4);
maxx.r = maxx.r >= rgb.r ? maxx.r : rgb.r;
maxx.g = maxx.g >= rgb.g ? maxx.g : rgb.g;
maxx.b = maxx.b >= rgb.b ? maxx.b : rgb.b;
*max = rgb_to_int(maxx);
}
__device__ static void replace_max(RGB rgb) {
uint32_t* min = &min_int_rgb;
uint8_t* pixel = (uint8_t*)min;
RGB minn = int_to_rgb(pixel, 4);
minn.r = minn.r < rgb.r ? minn.r : rgb.r;
minn.g = minn.g < rgb.g ? minn.g : rgb.g;
minn.b = minn.b < rgb.b ? minn.b : rgb.b;
*min = rgb_to_int(minn);
}
/* kernels */
/*********************************************************************************************************************************************/
__global__
void GPU_compute_greyscale(void *array, int size_w, int size_h, const int bpp, int pitch, const bool luminance) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size_w && j < size_h ) {
RGB rgb = { 0 , 0 , 0 , 0 };
uint8_t* pixel_value = (uint8_t*)(array)+i*bpp + j*pitch;
rgb = compute_greyscale(int_to_rgb(pixel_value, bpp), luminance);
set_pixel_color(pixel_value, rgb, bpp);
}
}
__global__
void GPU_compute_edges(void* image, void* save, unsigned int width, unsigned int height, int bpp, int pitch, uint8_t convolution, uint8_t border) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < width - 1 && j < height - 1 && i > 0 && j > 0) {
uint8_t* pixel = (uint8_t*)(image)+i*bpp + j*pitch;
uint8_t* p = (uint8_t*)(save)+i * bpp + j*pitch;
RGB rgb = get_convolution_values(pixel, bpp, pitch, convolution, border);
set_pixel_color(p, rgb, bpp);
}
else {
uint8_t* pixel = (uint8_t*)(image)+i*bpp + j*pitch;
if (border == AXOMAE_REPEAT) {
}
else if (border == AXOMAE_CLAMP)
{
}
else {
}
}
}
__global__
void GPU_compute_normals(void* image, void* save, unsigned int width, unsigned int height, int bpp, int pitch, double factor, uint8_t border) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
uint8_t* pixel = (uint8_t*)(image)+i * bpp + j * pitch;
uint8_t* write = (uint8_t*)(save)+i*bpp + j*pitch;
if (i < width - 1 && j < height - 1 && i > 1 && j > 1) {
RGB rgb = compute_normal(pixel, bpp, pitch, factor);
set_pixel_color(write, rgb, bpp);
}
else {
if (border == AXOMAE_REPEAT) {
}
else if (border == AXOMAE_CLAMP)
{
}
else {
}
}
}
/*host functions*/
/*********************************************************************************************************************************************/
gpu_threads get_optimal_thread_distribution(int width, int height, int pitch, int bpp) {
gpu_threads value;
int flat_array_size = width*height;
/*need compute capability > 2.0*/
dim3 threads = dim3(24, 24);
value.threads = threads;
if (flat_array_size <= threads.y * threads.x) {
dim3 blocks = dim3(1);
value.blocks = blocks;
}
else {
float divx = (float)width / threads.x;
float divy = (float)height / threads.y;
int blockx = (::floor(divx) == divx) ? static_cast<int>(divx) : ::floor(divx) + 1;
int blocky = (::floor(divy) == divy) ? static_cast<int>(divy) : ::floor(divy) + 1;
dim3 blocks(blockx, blocky);
value.blocks = blocks;
}
std::cout << "launching kernel with : " << value.blocks.x << " " << value.blocks.y << "\n";
return value;
}
static void check_error() {
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::cout << hipGetErrorString(err) << "\n";
}
}
void GPU_compute_greyscale(SDL_Surface* image, const bool luminance) {
int width = image->w;
int height = image->h;
int pitch = image->pitch;
int bpp = image->format->BytesPerPixel;
void* D_image;
size_t size = pitch * height;
hipMalloc((void**)&D_image, size);
hipMemcpy(D_image, image->pixels, size, hipMemcpyHostToDevice);
gpu_threads D = get_optimal_thread_distribution(width, height, pitch, bpp);
GPU_compute_greyscale << <D.blocks, D.threads >> > (D_image, width, height, bpp, pitch, luminance);
check_error();
SDL_LockSurface(image);
hipMemcpy(image->pixels, D_image, size, hipMemcpyDeviceToHost);
SDL_UnlockSurface(image);
hipFree(D_image);
}
void GPU_compute_height(SDL_Surface* greyscale, uint8_t convolution, uint8_t border) {
SDLSurfParam param(greyscale);
void* D_image , *R_image;
size_t size = param.getByteSize();
hipMalloc((void**)&D_image, size);
hipMalloc((void**)&R_image, size);
hipMemcpy(D_image, param.data, size, hipMemcpyHostToDevice);
gpu_threads D = get_optimal_thread_distribution(param.width, param.height, param.pitch, param.bpp);
D.blocks.x++; // border management
D.blocks.y++; //
GPU_compute_edges << < D.blocks, D.threads >> > (D_image,R_image, param.width, param.height, param.bpp, param.pitch, convolution, border);
check_error();
SDL_LockSurface(greyscale);
hipMemcpy(greyscale->pixels, R_image, size, hipMemcpyDeviceToHost);
SDL_UnlockSurface(greyscale);
hipFree(D_image);
hipFree(R_image);
}
void GPU_compute_normal(SDL_Surface* height, double factor, uint8_t border) {
SDLSurfParam param(height);
void * D_image, *D_save;
hipMalloc((void**)&D_image, param.getByteSize());
hipMalloc((void**)&D_save, param.getByteSize());
hipMemcpy(D_image, param.data, param.getByteSize(), hipMemcpyHostToDevice);
gpu_threads blocks = get_optimal_thread_distribution(param.width, param.height, param.pitch, param.bpp);
blocks.blocks.x++;
blocks.blocks.y++;
GPU_compute_normals << < blocks.blocks, blocks.threads >> > (D_image, D_save, param.width, param.height, param.bpp, param.pitch, factor, border);
check_error();
SDL_LockSurface(height);
hipMemcpy(height->pixels, D_save, param.getByteSize(), hipMemcpyDeviceToHost);
SDL_UnlockSurface(height);
hipFree(D_image);
hipFree(D_save);
}
};
|
13a2d21a2c4d82a2cd77e5f8d457b53b31095332.cu
|
#include "Kernel.cuh"
#include <cmath>
#include <sstream>
namespace axomae {
__device__
const static bool isbigEndian = SDL_BIG_ENDIAN == SDL_BYTEORDER;
__device__
static uint32_t max_int_rgb = 0;
__device__
static uint32_t min_int_rgb = UINT32_MAX;
struct Triplet {
int x;
int y;
int z;
};
struct RGB {
public:
uint8_t r;
uint8_t g;
uint8_t b;
uint8_t a;
__device__
void operator=(RGB rgb) {
this->r = rgb.r;
this->g = rgb.g;
this->b = rgb.b;
this->a = rgb.a;
}
__host__ std::string string() {
return std::to_string(r) + " " + std::to_string(g) + " " + std::to_string(b) + "\n";
}
__device__
RGB operator+(RGB rgb) {
return { uint8_t(r + rgb.r) ,uint8_t(g + rgb.g) ,uint8_t(b + rgb.b) ,uint8_t(a + rgb.a) };
}
template<typename T> __device__
RGB operator+(T rgb) {
return { r + rgb , g + rgb , b + rgb , a + rgb };
}
__device__
RGB operator*(RGB rgb) {
return { uint8_t(r * rgb.r) ,uint8_t(g * rgb.g) ,uint8_t(b * rgb.b) ,uint8_t(a * rgb.a) };
}
template<typename T> __device__
RGB operator*(T value) {
return { uint8_t(r * value) ,uint8_t(g * value) ,uint8_t(b * value) ,uint8_t(a * value) };
}
__device__
RGB normalize_rgb(RGB max , RGB min) {
uint8_t n_red = normalize(max.r ,min.r , r);
uint8_t n_green = normalize(max.g, min.g, g);
uint8_t n_blue = normalize(max.b, min.b, b);
return { n_red , n_green , n_blue , 0 };
}
/*compute the magnitude between to rgb values*/
__device__
RGB magnitude_rgb(RGB horizontal , RGB vertical) {
RGB rgb;
rgb.r = (uint8_t)magnitude(vertical.r, horizontal.r);
rgb.g = (uint8_t)magnitude(vertical.g, horizontal.g);
rgb.b = (uint8_t)magnitude(vertical.b, horizontal.b);
rgb.a = (uint8_t)magnitude(vertical.a, horizontal.a);
return rgb;
}
__device__
void print() {
printf("%i %i %i\n", r, g, b);
}
};
class SDLSurfParam {
public:
unsigned int width;
unsigned int height;
int bpp;
int pitch;
void* data;
SDLSurfParam(SDL_Surface* im) {
width = im->w;
height = im->h;
bpp = im->format->BytesPerPixel;
pitch = im->pitch;
data = im->pixels;
}
SDLSurfParam() {
}
size_t getByteSize() {
return height * pitch;
}
};
template<typename T>
struct custom_convolution_kernel {
T* array;
uint8_t size_w;
uint8_t size_h;
};
struct gpu_threads {
dim3 threads;
dim3 blocks;
};
/*device*/
/*********************************************************************************************************************************************/
__host__ __device__
uint32_t rgb_to_int(RGB val) {
uint32_t value = (isbigEndian) ? val.a | (val.b << 8) | (val.g << 16) | (val.r << 24) : val.r | (val.g << 8) | (val.b << 16) | (val.a << 24);
return value;
}
__device__
void initialize_2D_array(uint32_t *array, int size_w, int size_h) {
int i = blockIdx.x;
int j = threadIdx.x;
array[i*size_w + j] = 0;
}
__device__
RGB compute_greyscale(RGB rgb, const bool luminance) {
RGB ret;
if (luminance) {
ret.r = rgb.r* 0.3 + rgb.g*0.59 + rgb.b*0.11;
ret.g = ret.r;
ret.b = ret.r;
}
else
{
ret.r = (int)((rgb.r + rgb.b + rgb.g) / 3);
ret.g = ret.r;
ret.b = ret.r;
}
return ret;
}
__device__
RGB int_to_rgb(uint8_t* pixel_value, const int bpp) {
RGB rgb = { 0 , 0 , 0 , 0 };
if (bpp == 4) {
if (isbigEndian) {
rgb.r = *pixel_value >> 24 & 0xFF;
rgb.g = *pixel_value >> 16 & 0xFF;
rgb.b = *pixel_value >> 8 & 0xFF;
rgb.a = *pixel_value & 0xFF;
}
else {
rgb.a = *pixel_value >> 24 & 0xFF;
rgb.b = *pixel_value >> 16 & 0xFF;
rgb.g = *pixel_value >> 8 & 0xFF;
rgb.r = *pixel_value & 0xFF;
}
}
else if (bpp == 3) {
if (isbigEndian) {
rgb.r = pixel_value[0];
rgb.g = pixel_value[1];
rgb.b = pixel_value[2];
rgb.a = 0;
}
else {
rgb.b = pixel_value[0];
rgb.g = pixel_value[1];
rgb.r = pixel_value[2];
rgb.a = 0;
}
}
else if (bpp == 2) {
if (isbigEndian) {
rgb.r = *pixel_value >> 12 & 0xF;
rgb.g = *pixel_value >> 8 & 0XF;
rgb.b = *pixel_value >> 4 & 0XF;
rgb.a = *pixel_value & 0XF;
}
else {
rgb.a = *pixel_value >> 12 & 0xF;
rgb.b = *pixel_value >> 8 & 0XF;
rgb.g = *pixel_value >> 4 & 0XF;
rgb.r = *pixel_value & 0XF;
}
}
else if (bpp == 1) {
if (isbigEndian) {
rgb.r = *pixel_value >> 5 & 0X7;
rgb.g = *pixel_value >> 2 & 0X7;
rgb.b = *pixel_value & 0X3;
rgb.a = 0;
}
else {
rgb.b = *pixel_value >> 5 & 0X7;
rgb.g = *pixel_value >> 2 & 0X7;
rgb.r = *pixel_value & 0X3;
rgb.a = 0;
}
}
return rgb;
}
__device__
void set_pixel_color(uint8_t* pixel_value, RGB rgb, const int bpp) {
uint32_t toInt = rgb_to_int(rgb);
if (bpp == 4)
{
*(uint32_t*)(pixel_value) = toInt;
}
else if (bpp == 3) {
if (isbigEndian) {
((uint8_t*)pixel_value)[0] = toInt >> 16 & 0xFF;
((uint8_t*)pixel_value)[1] = toInt >> 8 & 0xFF;
((uint8_t*)pixel_value)[2] = toInt & 0xFF;
}
else {
((uint8_t*)pixel_value)[0] = toInt & 0xFF;
((uint8_t*)pixel_value)[1] = toInt >> 8 & 0xFF;
((uint8_t*)pixel_value)[2] = toInt >> 16 & 0xFF;
}
}
else if (bpp == 2) {
*((uint16_t*)pixel_value) = toInt;
}
else
{
*pixel_value = toInt;
}
}
__device__
RGB get_pixel_value_at(uint8_t *pixel, int i, int j, const int bpp, int pitch) {
uint8_t* p = (uint8_t*)(pixel) + i*bpp + j*pitch;
RGB A = int_to_rgb(p, bpp);
// printf("%i %i %i %i %i %i\n", p[0], p[1], p[2] , A.r , A.g , A.b);
return A;
}
struct convolution_directions {
RGB vertical;
RGB horizontal;
};
//TODO : case kernel < 0
__device__
convolution_directions compute_convolution(uint8_t * pixel, const int bpp, int pitch, const int h_kernel[KERNEL_SIZE][KERNEL_SIZE], const int v_kernel[KERNEL_SIZE][KERNEL_SIZE], uint8_t border_flag) {
RGB center = get_pixel_value_at(pixel, 0, 0, bpp, pitch);
RGB west = get_pixel_value_at(pixel, 0, -1, bpp, pitch); // here : if threadIdx.y = 0 bug
RGB north_west = get_pixel_value_at(pixel, -1, -1, bpp, pitch);
RGB north = get_pixel_value_at(pixel, -1, 0, bpp, pitch);
RGB north_east = get_pixel_value_at(pixel, -1, 1, bpp, pitch);
RGB east = get_pixel_value_at(pixel, 0, 1, bpp, pitch);
RGB south_east = get_pixel_value_at(pixel, 1, 1, bpp, pitch);
RGB south = get_pixel_value_at(pixel, 1, 0, bpp, pitch);
RGB south_west = get_pixel_value_at(pixel, 1, -1, bpp, pitch);
double verticalx = north_west.r * v_kernel[0][0] + north.r*v_kernel[0][1] + north_east.r*v_kernel[0][2] +
west.r * v_kernel[1][0] + center.r*v_kernel[1][1] + east.r*v_kernel[1][2] +
south_west.r * v_kernel[2][0] + south.r*v_kernel[2][1] + south_east.r*v_kernel[2][2];
double verticaly = north_west.g * v_kernel[0][0] + north.g*v_kernel[0][1] + north_east.g*v_kernel[0][2] +
west.g * v_kernel[1][0] + center.g*v_kernel[1][1] + east.g*v_kernel[1][2] +
south_west.g * v_kernel[2][0] + south.g*v_kernel[2][1] + south_east.g*v_kernel[2][2];
double verticalz = north_west.b * v_kernel[0][0] + north.b*v_kernel[0][1] + north_east.b*v_kernel[0][2] +
west.b * v_kernel[1][0] + center.b*v_kernel[1][1] + east.b*v_kernel[1][2] +
south_west.b * v_kernel[2][0] + south.b*v_kernel[2][1] + south_east.b*v_kernel[2][2];
double horizontalx = north_west.r * h_kernel[0][0] + north.r*h_kernel[0][1] + north_east.r*h_kernel[0][2] +
west.r * h_kernel[1][0] + center.r*h_kernel[1][1] + east.r*h_kernel[1][2] +
south_west.r * h_kernel[2][0] + south.r*h_kernel[2][1] + south_east.r*h_kernel[2][2];
double horizontaly = north_west.g * h_kernel[0][0] + north.g*h_kernel[0][1] + north_east.g*h_kernel[0][2] +
west.g * h_kernel[1][0] + center.g*h_kernel[1][1] + east.g*h_kernel[1][2] +
south_west.g * h_kernel[2][0] + south.g*h_kernel[2][1] + south_east.g*h_kernel[2][2];
double horizontalz = north_west.b * h_kernel[0][0] + north.b*h_kernel[0][1] + north_east.b*h_kernel[0][2] +
west.b * h_kernel[1][0] + center.b*h_kernel[1][1] + east.b*h_kernel[1][2] +
south_west.b * h_kernel[2][0] + south.b*h_kernel[2][1] + south_east.b*h_kernel[2][2];
convolution_directions dir;
RGB minn = { 0, 0, 0, 0 };
RGB maxx = { 255 , 255 , 255 , 255 };
auto rh = normalize(maxx.r, minn.r, horizontalx);
auto rv = normalize(maxx.r, minn.r, verticalx);
auto gh = normalize(maxx.r, minn.r, horizontaly);
auto gv = normalize(maxx.r, minn.r, verticaly);
auto bh = normalize(maxx.r, minn.r, horizontalz);
auto bv = normalize(maxx.r, minn.r, verticalz);
dir.vertical = { rv , gv , bv , 0 };
dir.horizontal = { rh , gh , bh , 0};
return dir;
}
/* pos 0 = vertical convolution kernel
pos 1 = horizontal convolution kernel */
__device__
RGB get_convolution_values(uint8_t* pixel, const int bpp, int pitch, uint8_t convolution, uint8_t border) {
int custom_kernel = 0;
convolution_directions convoluted ;
if (custom_kernel == 0) {
if (convolution == AXOMAE_USE_SOBEL) {
convoluted = compute_convolution(pixel, bpp, pitch, sobel_mask_horizontal, sobel_mask_vertical, border);
}
else if (convolution == AXOMAE_USE_PREWITT) {
convoluted = compute_convolution(pixel, bpp, pitch, prewitt_mask_horizontal, prewitt_mask_vertical, border);
}
else {
convoluted = compute_convolution(pixel, bpp, pitch, scharr_mask_horizontal, scharr_mask_vertical, border);
}
RGB var = convoluted.vertical.magnitude_rgb(convoluted.vertical , convoluted.horizontal);
return var;
}
else {
//TODO : add custom kernels processing
return { 0 , 0 , 0 , 0 };
}
}
__device__ RGB compute_normal(uint8_t* pixel, int bpp, int pitch, double factor) {
RGB center = get_pixel_value_at(pixel, 0, 0, bpp, pitch);
RGB west = get_pixel_value_at(pixel, 0, -1, bpp, pitch);
RGB north_west = get_pixel_value_at(pixel, -1, -1, bpp, pitch);
RGB north = get_pixel_value_at(pixel, -1, 0, bpp, pitch);
RGB north_east = get_pixel_value_at(pixel, -1, 1, bpp, pitch);
RGB east = get_pixel_value_at(pixel, 0, 1, bpp, pitch);
RGB south_east = get_pixel_value_at(pixel, 1, 1, bpp, pitch);
RGB south = get_pixel_value_at(pixel, 1, 0, bpp, pitch);
RGB south_west = get_pixel_value_at(pixel, 1, -1, bpp, pitch);
float dx = factor * (east.g - west.g) / 255;
float dy = factor * (north.g - south.g) / 255;
float ddx = factor * (north_east.g - south_west.g) / 255;
float ddy = factor * (north_west.g - south_east.g) / 255;
auto Nx = normalize(-1, 1, lerp(dy, ddy, 0.5));
auto Ny = normalize(-1, 1, lerp(dx, ddx, 0.5));
auto Nz = 255;
if (Nx >= 255)
Nx = 255;
else if (Nx <= 0)
Nx = 0;
if (Ny >= 255)
Ny = 255;
else if (Ny <= 0)
Ny = 0;
return { (int)floor(Nx) , (int)floor(Ny) , Nz , 0 };
}
__device__ static void replace_min(RGB rgb) {
uint32_t* max = &max_int_rgb;
uint8_t* pixel = (uint8_t*)max;
RGB maxx = int_to_rgb(pixel, 4);
maxx.r = maxx.r >= rgb.r ? maxx.r : rgb.r;
maxx.g = maxx.g >= rgb.g ? maxx.g : rgb.g;
maxx.b = maxx.b >= rgb.b ? maxx.b : rgb.b;
*max = rgb_to_int(maxx);
}
__device__ static void replace_max(RGB rgb) {
uint32_t* min = &min_int_rgb;
uint8_t* pixel = (uint8_t*)min;
RGB minn = int_to_rgb(pixel, 4);
minn.r = minn.r < rgb.r ? minn.r : rgb.r;
minn.g = minn.g < rgb.g ? minn.g : rgb.g;
minn.b = minn.b < rgb.b ? minn.b : rgb.b;
*min = rgb_to_int(minn);
}
/* kernels */
/*********************************************************************************************************************************************/
__global__
void GPU_compute_greyscale(void *array, int size_w, int size_h, const int bpp, int pitch, const bool luminance) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size_w && j < size_h ) {
RGB rgb = { 0 , 0 , 0 , 0 };
uint8_t* pixel_value = (uint8_t*)(array)+i*bpp + j*pitch;
rgb = compute_greyscale(int_to_rgb(pixel_value, bpp), luminance);
set_pixel_color(pixel_value, rgb, bpp);
}
}
__global__
void GPU_compute_edges(void* image, void* save, unsigned int width, unsigned int height, int bpp, int pitch, uint8_t convolution, uint8_t border) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < width - 1 && j < height - 1 && i > 0 && j > 0) {
uint8_t* pixel = (uint8_t*)(image)+i*bpp + j*pitch;
uint8_t* p = (uint8_t*)(save)+i * bpp + j*pitch;
RGB rgb = get_convolution_values(pixel, bpp, pitch, convolution, border);
set_pixel_color(p, rgb, bpp);
}
else {
uint8_t* pixel = (uint8_t*)(image)+i*bpp + j*pitch;
if (border == AXOMAE_REPEAT) {
}
else if (border == AXOMAE_CLAMP)
{
}
else {
}
}
}
__global__
void GPU_compute_normals(void* image, void* save, unsigned int width, unsigned int height, int bpp, int pitch, double factor, uint8_t border) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
uint8_t* pixel = (uint8_t*)(image)+i * bpp + j * pitch;
uint8_t* write = (uint8_t*)(save)+i*bpp + j*pitch;
if (i < width - 1 && j < height - 1 && i > 1 && j > 1) {
RGB rgb = compute_normal(pixel, bpp, pitch, factor);
set_pixel_color(write, rgb, bpp);
}
else {
if (border == AXOMAE_REPEAT) {
}
else if (border == AXOMAE_CLAMP)
{
}
else {
}
}
}
/*host functions*/
/*********************************************************************************************************************************************/
gpu_threads get_optimal_thread_distribution(int width, int height, int pitch, int bpp) {
gpu_threads value;
int flat_array_size = width*height;
/*need compute capability > 2.0*/
dim3 threads = dim3(24, 24);
value.threads = threads;
if (flat_array_size <= threads.y * threads.x) {
dim3 blocks = dim3(1);
value.blocks = blocks;
}
else {
float divx = (float)width / threads.x;
float divy = (float)height / threads.y;
int blockx = (std::floor(divx) == divx) ? static_cast<int>(divx) : std::floor(divx) + 1;
int blocky = (std::floor(divy) == divy) ? static_cast<int>(divy) : std::floor(divy) + 1;
dim3 blocks(blockx, blocky);
value.blocks = blocks;
}
std::cout << "launching kernel with : " << value.blocks.x << " " << value.blocks.y << "\n";
return value;
}
static void check_error() {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cout << cudaGetErrorString(err) << "\n";
}
}
void GPU_compute_greyscale(SDL_Surface* image, const bool luminance) {
int width = image->w;
int height = image->h;
int pitch = image->pitch;
int bpp = image->format->BytesPerPixel;
void* D_image;
size_t size = pitch * height;
cudaMalloc((void**)&D_image, size);
cudaMemcpy(D_image, image->pixels, size, cudaMemcpyHostToDevice);
gpu_threads D = get_optimal_thread_distribution(width, height, pitch, bpp);
GPU_compute_greyscale << <D.blocks, D.threads >> > (D_image, width, height, bpp, pitch, luminance);
check_error();
SDL_LockSurface(image);
cudaMemcpy(image->pixels, D_image, size, cudaMemcpyDeviceToHost);
SDL_UnlockSurface(image);
cudaFree(D_image);
}
void GPU_compute_height(SDL_Surface* greyscale, uint8_t convolution, uint8_t border) {
SDLSurfParam param(greyscale);
void* D_image , *R_image;
size_t size = param.getByteSize();
cudaMalloc((void**)&D_image, size);
cudaMalloc((void**)&R_image, size);
cudaMemcpy(D_image, param.data, size, cudaMemcpyHostToDevice);
gpu_threads D = get_optimal_thread_distribution(param.width, param.height, param.pitch, param.bpp);
D.blocks.x++; // border management
D.blocks.y++; //
GPU_compute_edges << < D.blocks, D.threads >> > (D_image,R_image, param.width, param.height, param.bpp, param.pitch, convolution, border);
check_error();
SDL_LockSurface(greyscale);
cudaMemcpy(greyscale->pixels, R_image, size, cudaMemcpyDeviceToHost);
SDL_UnlockSurface(greyscale);
cudaFree(D_image);
cudaFree(R_image);
}
void GPU_compute_normal(SDL_Surface* height, double factor, uint8_t border) {
SDLSurfParam param(height);
void * D_image, *D_save;
cudaMalloc((void**)&D_image, param.getByteSize());
cudaMalloc((void**)&D_save, param.getByteSize());
cudaMemcpy(D_image, param.data, param.getByteSize(), cudaMemcpyHostToDevice);
gpu_threads blocks = get_optimal_thread_distribution(param.width, param.height, param.pitch, param.bpp);
blocks.blocks.x++;
blocks.blocks.y++;
GPU_compute_normals << < blocks.blocks, blocks.threads >> > (D_image, D_save, param.width, param.height, param.bpp, param.pitch, factor, border);
check_error();
SDL_LockSurface(height);
cudaMemcpy(height->pixels, D_save, param.getByteSize(), cudaMemcpyDeviceToHost);
SDL_UnlockSurface(height);
cudaFree(D_image);
cudaFree(D_save);
}
};
|
dc61a42781089d7826aaf0eaeb36af34307aff8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <qudaQKXTM.h>
#include <errno.h>
#include <mpi.h>
#include <cuPrintf.cu>
#include <arlib.h>
#include <lanczos.h>
#include <limits>
#define THREADS_PER_BLOCK 32
#define PI 3.141592653589793
using namespace quda;
// $$ Section 1: Texture References $$
/* texture block */
texture<int4, 1> gaugeTexPlaq; // use this texture reference only for plaquette
texture<int4, 1> gaugeTexAPE; // use this for APE smearing , this texture will be binded and unbinded
texture<int4, 1> vectorTexGauss; // this texture needed for gaussian smearing
texture<int4, 1> propagatorTexAPE; // APE smearing need a propagator structure
texture<int4, 1> gaugeTexNorm2;
texture<int4, 1> vectorTexNorm2; // to find the norm
texture<int4, 1> propagatorTexNorm2;
texture<int4, 1> propagatorTexOne; // for contractions
texture<int4, 1> propagatorTexTwo;
texture<int4, 1> correlationTex;
texture<int4, 1> propagator3DTex1;
texture<int4, 1> propagator3DTex2;
texture<int4, 1> seqPropagatorTex;
texture<int4, 1> fwdPropagatorTex;
texture<int4, 1> gaugeDerivativeTex;
texture<int4, 1> phiVectorStochTex;
texture<int4, 1> propStochTex;
texture<int4, 1> insLineFourierTex;
texture<int4, 1> uprop3DStochTex;
texture<int4, 1> dprop3DStochTex;
texture<int4, 1> sprop3DStochTex;
texture<int4, 1> insLineMomTex;
texture<int4, 1> xiVector3DStochTex;
texture<int4, 1> gaugePath; // bind standard texture for wilson path
texture<int4, 1>gaugeTexHYP;
texture<int4, 1>propagatorTexHYP;
texture<int4, 1>fieldTex;
texture<int2, 1>matrixTex;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 2: Constant Refeneces $$
/* block for device constants */
__constant__ bool c_dimBreak[4];
__constant__ int c_nColor;
__constant__ int c_nDim;
__constant__ int c_localL[4];
__constant__ int c_plusGhost[4];
__constant__ int c_minusGhost[4];
__constant__ int c_stride;
__constant__ int c_surface[4];
__constant__ int c_nSpin;
__constant__ double c_alphaAPE;
__constant__ double c_alphaGauss;
__constant__ int c_threads;
__constant__ int c_eps[6][3];
__constant__ int c_sgn_eps[6];
__constant__ int c_procPosition[4];
__constant__ int c_sourcePosition[4];
__constant__ int c_totalL[4];
__constant__ double c_matrixQ[50*50];
__constant__ double c_tolArnoldi;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 3: Device /*Inline*/ Functions $$
/* Block for device kernels */
#if (__COMPUTE_CAPABILITY__ >= 130)
__inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
int4 v = tex1Dfetch(t,i);
return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z));
}
__inline__ __device__ double fetch_double(texture<int2, 1> t, int i)
{
int2 v = tex1Dfetch(t,i);
return __hiloint2double(v.y,v.x);
}
#else
__inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
// do nothing
return make_double2(0.0, 0.0);
}
#endif
__device__ inline double2 operator*(const double a , const double2 b){
double2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
__device__ inline double2 operator*(const int a , const double2 b){
double2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
__device__ inline double2 operator*(const double2 a, const double2 b){
double2 res;
res.x = a.x*b.x - a.y*b.y;
res.y = a.x*b.y + a.y*b.x;
return res;
}
__device__ inline double2 operator+(const double2 a, const double2 b){
double2 res;
res.x = a.x + b.x;
res.y = a.y + b.y;
return res;
}
__device__ inline double2 operator-(const double2 a, const double2 b){
double2 res;
res.x = a.x - b.x;
res.y = a.y - b.y;
return res;
}
__device__ inline double2 conj(const double2 a){
double2 res;
res.x = a.x;
res.y = -a.y;
return res;
}
__device__ inline double norm(const double2 a){
double res;
res = sqrt(a.x*a.x + a.y*a.y);
return res;
}
__device__ inline double norm2(const double2 a){
double res;
res = a.x*a.x + a.y*a.y;
return res;
}
__device__ inline double2 cpow(const double2 x , const double a){
double2 res;
res.x = pow(norm(x),a) * cos( atan2(x.y,x.x) * a);
res.y = pow(norm(x),a) * sin( atan2(x.y,x.x) * a);
return res;
}
__device__ inline double2 operator/(const double2 x, const double2 y){
double2 res;
res.x = (x.x * y.x + x.y * y.y) / (y.x * y.x + y.y * y.y);
res.y = (x.y * y.x - x.x * y.y) / (y.x * y.x + y.y * y.y);
return res;
}
// $$ Section 4: Device Kernels $$
#include <core_def.h>
__global__ void calculatePlaq_kernel(double *partial_plaq){
#include <plaquette_core.h>
}
__global__ void APE_kernel_1(double2 *prp , double2 *out){
#include <APE_core_1.h>
}
#ifdef _PROPAGATOR_APE_TEX
__global__ void APE_kernel_2(double2 *out){
#include <APE_core_2.h>
}
#else
__global__ void APE_kernel_2(double2 *prop,double2 *out){
#include <APE_core_2.h>
}
#endif
__global__ void Gauss_kernel(double2 *out){
#include <Gauss_core.h>
}
__global__ void norm2Gauge_kernel(double *cache){
#include <norm2Gauge_core.h>
}
__global__ void norm2Vector_kernel(double *cache){
#include <norm2Vector_core.h>
}
__global__ void norm2Propagator_kernel(double *cache){
#include <norm2Propagator_core.h>
}
__global__ void uploadToCuda_kernel(double2 *in, double2 *outEven, double2 *outOdd){
#include <uploadToCuda_core.h>
}
__global__ void downloadFromCuda_kernel(double2 *out, double2 *inEven, double2 *inOdd){
#include <downloadFromCuda_core.h>
}
__global__ void scaleVector_kernel(double2 *inOut,double a){
#include <scaleVector_core.h>
}
__global__ void rotateToPhysicalBase_kernel(double2 *inOut, int sign){
#include <rotateToPhysicalBase_core.h>
}
__global__ void contract_Type1_kernel(double2 *out){
#include <contract_Type1_core.h>
}
__global__ void fourierCorr_kernel(double2 *block ,int it ,int nx , int ny , int nz){
#include <fourierCorr_core.h>
}
__global__ void fourierCorr_kernel2(double2 *block ,int it ,int nx , int ny , int nz){
#include <fourierCorr_core2.h>
}
__global__ void fourierCorr_kernel3(double2 *block ,int it , int nx , int ny , int nz){
#include <fourierCorr_core3.h>
}
__global__ void fourierCorr_kernel4(double2 *block , int nx , int ny , int nz){
#include <fourierCorr_core4.h>
}
__global__ void seqSourceFixSinkPart1_kernel( double2 *out, int timeslice ,int c_nu, int c_c2, whatProjector typeProj, whatParticle testParticle ){
#include <seqSourceFixSinkPart1_core.h>
}
__global__ void seqSourceFixSinkPart2_kernel( double2 *out, int timeslice ,int c_nu, int c_c2, whatProjector typeProj, whatParticle testParticle ){
#include <seqSourceFixSinkPart2_core.h>
}
__global__ void conjugate_vector_kernel( double2 *inOut ){
#include <conjugate_vector_core.h>
}
__global__ void conjugate_propagator_kernel( double2 *inOut ){
#include <conjugate_propagator_core.h>
}
__global__ void apply_gamma5_vector_kernel( double2 *inOut ){
#include <apply_gamma5_vector_core.h>
}
__global__ void apply_gamma_transf_vector_kernel( double2 *inOut ){
#include <apply_gamma_transf_vector_core.h>
}
__global__ void apply_gamma5_propagator_kernel( double2 *inOut ){
#include <apply_gamma5_propagator_core.h>
}
__global__ void fixSinkContractions_local_kernel( double2 *out, int flag, whatParticle testParticle, int partFlag){ // partFlag must be 1 or 2
#include <fixSinkContractions_local_core.h>
}
__global__ void fixSinkContractions_oneD_kernel( double2 *out ,int flag,int dir ,whatParticle testParticle,int partFlag){
#include <fixSinkContractions_oneD_core.h>
}
__global__ void fixSinkContractions_noether_kernel( double2 *out ,int dir ,whatParticle testParticle,int partFlag){
#include <fixSinkContractions_noether_core.h>
}
__global__ void fixSinkContractions_nonLocal_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, whatParticle testParticle, int partFlag,int direction){
#include <fixSinkContractions_nonLocal_core.h>
}
__global__ void fixSinkContractions_nonLocalBwd_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, whatParticle testParticle, int partFlag,int direction){
#include <fixSinkContractions_nonLocalBwd_core.h>
}
__global__ void insLine_local_kernel( double2 *out , int iflag , int partFlag ){
#include <insLine_local_core.h>
}
__global__ void insLine_oneD_kernel(double2 *out, int iflag , int dir){
#include <insLine_oneD_core.h>
}
__global__ void insLine_oneD_kernel_new(double2 *out, int dir){
#include <insLine_oneD_core_new.h>
}
__global__ void insLine_noether_kernel(double2 *out, int dir){
#include <insLine_noether_core.h>
}
__global__ void contract3pf_Type1_1_kernel( double2 *out, int iflag, int index1 , int index2){
#include <contract3pf_Type1_1_core.h>
}
__global__ void contract3pf_Type1_2_kernel( double2 *out, int iflag, int index1){
#include <contract3pf_Type1_2_core.h>
}
__global__ void partial_lvl1_Contract3pf_Type1_1_kernel(double2 *out, int index1, int index2){
#include <partial_lvl1_Contract3pf_Type1_1_core.h>
}
__global__ void partial_lvl3_Contract3pf_Type1_1_kernel(double2 *out,int gamma,int gamma1, int index1, int index2){
#include <partial_lvl3_Contract3pf_Type1_1_core.h>
}
__global__ void partial_lvl3_Contract3pf_Type1_2_kernel(double2 *out,int gamma,int gamma1, int index1){
#include <partial_lvl3_Contract3pf_Type1_2_core.h>
}
///////////// NEW
__global__ void partial_Contract3pf_pion_kernel(double2 *out, int index){
#include <partial_Contract3pf_pion_core.h>
}
__global__ void insLine_Nonlocal_pion_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, int dir,int index){
#include <insLine_Nonlocal_pion_core.h>
}
__global__ void contract_twop_pion_kernel(double2 *out){
#include <contract_twop_pion_core.h>
}
__global__ void fourierPion_kernel(double2 *block ,int it ,int nx , int ny , int nz){
#include <fourierPion_core.h>
}
__global__ void getVectorProp3D_kernel( double2 *out, int timeslice ,int nu,int c2){
#include <getVectorProp3D_core.h>
}
////////////////
__global__ void createWilsonPath_kernel(double2 *deviceWilsonPath,int direction){
#include <createWilsonPath_core.h>
}
__global__ void createWilsonPathBwd_kernel(double2 *deviceWilsonPath,int direction){
#include <createWilsonPathBwd_core.h>
}
__global__ void createWilsonPath_kernel_all(double2 *deviceWilsonPath){
#include <createWilsonPath_allDirections_core.h>
}
__global__ void insLine_Nonlocal_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, int dir){
#include <insLine_Nonlocal_core.h>
}
__global__ void HYP3D_kernel_1(double2 *prp1){
#include <HYP3D_core_1.h>
}
__global__ void HYP3D_kernel_2(double2 *prp2){
#include <HYP3D_core_2.h>
}
__global__ void HYP3D_kernel_3(double2 *prp2, double omega2){
#include <HYP3D_core_3.h>
}
__global__ void HYP3D_kernel_4(double2 *prp1,double2 *out){
#include <HYP3D_core_4.h>
}
__global__ void HYP3D_kernel_5(double2 *out, double omega1){
#include <HYP3D_core_5.h>
}
__global__ void apply_momentum_kernel(double2 *vector, int nx , int ny , int nz){
#include <apply_momentum_core.h>
}
/*
__global__ void matrixNxMmatrixMxL_kernel(double2 *mNxM, int NL, int M, int L, bool transpose){
#include <matrixNxMmatrixMxL_core.h>
}
*/
__global__ void matrixNxMmatrixMxLReal_kernel(double2 *mNxM, int NL, int M, int L, bool transpose){
#include <matrixNxMmatrixMxLReal_core.h>
}
__global__ void matrixNxMmatrixMxLRealTexture_kernel(double2 *mNxM, int NL, int M, int L, bool transpose){
#include <matrixNxMmatrixMxLRealTexture_core.h>
}
__global__ void noiseCleaner_kernel(double2 *A){
#include <noiseCleaner_core.h>
}
__global__ void makeTridiagonal_kernel(double *A){
#include <makeTridiagonal_core.h>
}
__global__ void UxMomentumPhase_kernel(double2 *inOut, int px, int py, int pz, double zeta){
#include <UxMomentumPhase_core.h>
}
///////////////////////////////////////////////////////////////////////////////
// $$ Section 5: Static Global Variables $$
///////////////////////////////////////////////////
/* Block for static global variables */
float G_deviceMemory = 0.;
int G_nColor;
int G_nSpin;
int G_nDim;
int G_strideFull;
double G_alphaAPE;
double G_alphaGauss;
int G_localVolume;
int G_totalVolume;
int G_nsmearAPE;
int G_nsmearGauss;
bool G_dimBreak[QUDAQKXTM_DIM];
int G_localL[QUDAQKXTM_DIM];
int G_totalL[QUDAQKXTM_DIM];
int G_nProc[QUDAQKXTM_DIM];
int G_plusGhost[QUDAQKXTM_DIM];
int G_minusGhost[QUDAQKXTM_DIM];
int G_surface3D[QUDAQKXTM_DIM];
bool G_init_qudaQKXTM_flag = false;
int G_nsmearHYP;
double G_omega1HYP;
double G_omega2HYP;
// for mpi use global variables
MPI_Group G_fullGroup , G_spaceGroup , G_timeGroup;
MPI_Comm G_spaceComm , G_timeComm;
int G_localRank;
int G_localSize;
int G_timeRank;
int G_timeSize;
//////////////////////////////////////////////////
// $$ Section 6: Initialize qudaQKXTM $$
// initialization function for qudaQKXTM lib
void quda::init_qudaQKXTM(qudaQKXTMinfo *info){
if(G_init_qudaQKXTM_flag == false){
G_nColor = 3;
G_nSpin = 4;
G_nDim = QUDAQKXTM_DIM;
G_alphaAPE = info->alphaAPE;
G_alphaGauss = info->alphaGauss;
G_nsmearAPE = info->nsmearAPE;
G_nsmearGauss = info->nsmearGauss;
G_nsmearHYP = info->nsmearHYP;
G_omega1HYP = info->omega1HYP;
G_omega2HYP = info->omega2HYP;
// from now on depends on lattice and break format we choose
for(int i = 0 ; i < G_nDim ; i++)
G_nProc[i] = comm_dim(i);
for(int i = 0 ; i < G_nDim ; i++){ // take local and total lattice
G_localL[i] = info->lL[i];
G_totalL[i] = G_nProc[i] * G_localL[i];
}
G_localVolume = 1;
G_totalVolume = 1;
for(int i = 0 ; i < G_nDim ; i++){
G_localVolume *= G_localL[i];
G_totalVolume *= G_totalL[i];
}
G_strideFull = G_localVolume;
for (int i=0; i<G_nDim; i++) {
G_surface3D[i] = 1;
for (int j=0; j<G_nDim; j++) {
if (i==j) continue;
G_surface3D[i] *= G_localL[j];
}
}
for(int i = 0 ; i < G_nDim ; i++)
if( G_localL[i] == G_totalL[i] )
G_surface3D[i] = 0;
for(int i = 0 ; i < G_nDim ; i++){
G_plusGhost[i] =0;
G_minusGhost[i] = 0;
}
#ifdef MULTI_GPU
int lastIndex = G_localVolume;
for(int i = 0 ; i < G_nDim ; i++)
if( G_localL[i] < G_totalL[i] ){
G_plusGhost[i] = lastIndex ;
G_minusGhost[i] = lastIndex + G_surface3D[i];
lastIndex += 2*G_surface3D[i];
}
#endif
for(int i = 0 ; i < G_nDim ; i++){
if( G_localL[i] < G_totalL[i])
G_dimBreak[i] = true;
else
G_dimBreak[i] = false;
}
const int eps[6][3]=
{
{0,1,2},
{2,0,1},
{1,2,0},
{2,1,0},
{0,2,1},
{1,0,2}
};
const int sgn_eps[6]=
{
+1,+1,+1,-1,-1,-1
};
int procPosition[4];
for(int i= 0 ; i < 4 ; i++)
procPosition[i] = comm_coords(i);
int sourcePosition[4];
// put it zero but change it later
for(int i = 0 ; i < 4 ; i++)
sourcePosition[i] = info->sourcePosition[i];
// initialization consist also from define device constants
hipMemcpyToSymbol(c_nColor, &G_nColor, sizeof(int) );
hipMemcpyToSymbol(c_nSpin, &G_nSpin, sizeof(int) );
hipMemcpyToSymbol(c_nDim, &G_nDim, sizeof(int) );
hipMemcpyToSymbol(c_stride, &G_strideFull, sizeof(int) );
hipMemcpyToSymbol(c_alphaAPE, &G_alphaAPE , sizeof(double) );
hipMemcpyToSymbol(c_alphaGauss, &G_alphaGauss , sizeof(double) );
hipMemcpyToSymbol(c_threads , &G_localVolume , sizeof(double) ); // may change
hipMemcpyToSymbol(c_dimBreak , G_dimBreak , QUDAQKXTM_DIM*sizeof(bool) );
hipMemcpyToSymbol(c_localL , G_localL , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_totalL , G_totalL , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_plusGhost , G_plusGhost , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_minusGhost , G_minusGhost , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_surface , G_surface3D , QUDAQKXTM_DIM*sizeof(int) );
hipMemcpyToSymbol(c_eps, &(eps[0][0]) , 6*3*sizeof(int) );
hipMemcpyToSymbol(c_sgn_eps, sgn_eps , 6*sizeof(int) );
hipMemcpyToSymbol(c_procPosition, procPosition, QUDAQKXTM_DIM*sizeof(int));
hipMemcpyToSymbol(c_sourcePosition, sourcePosition, QUDAQKXTM_DIM*sizeof(int));
// double machineEpsilon = std::numeric_limits<double>::epsilon();
// hipMemcpyToSymbol(c_machineEpsilon , &machineEpsilon , sizeof(double));
checkCudaError();
// create groups of process to use mpi reduce only on spatial points
MPI_Comm_group(MPI_COMM_WORLD, &G_fullGroup);
int space3D_proc;
space3D_proc = G_nProc[0] * G_nProc[1] * G_nProc[2];
int *ranks = (int*) malloc(space3D_proc*sizeof(int));
for(int i= 0 ; i < space3D_proc ; i++)
ranks[i] = comm_coords(3) * space3D_proc + i;
printf("%d (%d,%d,%d,%d)\n",comm_rank(),comm_coords(0),comm_coords(1),comm_coords(2),comm_coords(3));
for(int i= 0 ; i < space3D_proc ; i++)
printf("%d %d\n",comm_rank(), ranks[i]);
MPI_Group_incl(G_fullGroup,space3D_proc,ranks,&G_spaceGroup);
MPI_Group_rank(G_spaceGroup,&G_localRank);
MPI_Group_size(G_spaceGroup,&G_localSize);
MPI_Comm_create(MPI_COMM_WORLD, G_spaceGroup , &G_spaceComm);
// create group of process to use mpi gather
int *ranksTime = (int*) malloc(G_nProc[3]*sizeof(int));
for(int i=0 ; i < G_nProc[3] ; i++)
ranksTime[i] = i*space3D_proc;
MPI_Group_incl(G_fullGroup,G_nProc[3], ranksTime, &G_timeGroup);
MPI_Group_rank(G_timeGroup, &G_timeRank);
MPI_Group_size(G_timeGroup, &G_timeSize);
MPI_Comm_create(MPI_COMM_WORLD, G_timeGroup, &G_timeComm);
//////////////////////////////////////////////////////////////////////////////
free(ranks);
free(ranksTime);
G_init_qudaQKXTM_flag = true;
printfQuda("qudaQKXTM has been initialized\n");
}
else
return;
}
int quda::comm_localRank(){
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
return G_localRank;
}
void quda::printf_qudaQKXTM(){
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
printfQuda("Number of colors is %d\n",G_nColor);
printfQuda("Number of spins is %d\n",G_nSpin);
printfQuda("Number of dimensions is %d\n",G_nDim);
printfQuda("Number of process in each direction is (x,y,z,t) %d x %d x %d x %d\n",G_nProc[0],G_nProc[1],G_nProc[2],G_nProc[3]);
printfQuda("Total lattice is (x,y,z,t) %d x %d x %d x %d\n",G_totalL[0],G_totalL[1],G_totalL[2],G_totalL[3]);
printfQuda("Local lattice is (x,y,z,t) %d x %d x %d x %d\n",G_localL[0],G_localL[1],G_localL[2],G_localL[3]);
printfQuda("Total volume is %d\n",G_totalVolume);
printfQuda("Local volume is %d\n",G_localVolume);
printfQuda("Surface is (x,y,z,t) ( %d , %d , %d , %d)\n",G_surface3D[0],G_surface3D[1],G_surface3D[2],G_surface3D[3]);
printfQuda("The plus Ghost points in directions (x,y,z,t) ( %d , %d , %d , %d )\n",G_plusGhost[0],G_plusGhost[1],G_plusGhost[2],G_plusGhost[3]);
printfQuda("The Minus Ghost points in directixons (x,y,z,t) ( %d , %d , %d , %d )\n",G_minusGhost[0],G_minusGhost[1],G_minusGhost[2],G_minusGhost[3]);
printfQuda("For APE smearing we use nsmear = %d , alpha = %lf\n",G_nsmearAPE,G_alphaAPE);
printfQuda("For Gauss smearing we use nsmear = %d , alpha = %lf\n",G_nsmearGauss,G_alphaGauss);
printfQuda("For HYP smearing we use nsmear = %d , omega1 = %lf , omega2 = %lf\n",G_nsmearHYP,G_omega1HYP,G_omega2HYP);
}
/////////////////// METHODS //////////////////////////////
//////////////////////// class QKXTM_Field /////////////////////////////
// $$ Section 7: Class QKXTM_Field $$
QKXTM_Field::QKXTM_Field():
h_elem(NULL) , d_elem(NULL) , h_ext_ghost(NULL) , d_ext_ghost(NULL), field_binded(false)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
field_length = 1;
ghost_length = 0;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
// create_all();
}
QKXTM_Field::~QKXTM_Field(){
// destroy_all();
}
void QKXTM_Field::create_host(){
h_elem = (double*) malloc(bytes_total_length);
if(h_elem == NULL) errorQuda("Error with allocation host memory");
}
void QKXTM_Field::create_host_ghost(){
#ifdef MULTI_GPU
if( comm_size() > 1){
h_ext_ghost = (double*) malloc(bytes_ghost_length);
if(h_ext_ghost == NULL)errorQuda("Error with allocation host memory");
}
#endif
}
void QKXTM_Field::create_device(){
hipMalloc((void**)&d_elem,bytes_total_length);
checkCudaError();
G_deviceMemory += bytes_total_length/(1024.*1024.); // device memory in MB
printfQuda("Device memory in used is %f MB A \n",G_deviceMemory);
}
void QKXTM_Field::create_device_ghost(){
#ifdef MULTI_GPU
if( comm_size() > 1){
hipMalloc((void**)&d_ext_ghost,bytes_ghost_length);
checkCudaError();
G_deviceMemory += bytes_ghost_length/(1024.*1024.);
printfQuda("Device memory in used is %f MB A \n",G_deviceMemory);
}
#endif
}
void QKXTM_Field::destroy_host(){
free(h_elem);
h_elem = NULL;
}
void QKXTM_Field::destroy_device(){
if(d_elem != NULL){
hipFree(d_elem);
checkCudaError();
d_elem = NULL;
G_deviceMemory -= bytes_total_length/(1024.*1024.);
printfQuda("Device memory in used is %f MB D \n",G_deviceMemory);
}
}
void QKXTM_Field::destroy_host_ghost(){
#ifdef MULTI_GPU
if( (comm_size() > 1) ){
free(h_ext_ghost);
h_ext_ghost = NULL;
}
#endif
}
void QKXTM_Field::destroy_device_ghost(){
#ifdef MULTI_GPU
if( comm_size() > 1 ){
if(d_ext_ghost != NULL){
hipFree(d_ext_ghost);
d_ext_ghost = NULL;
checkCudaError();
G_deviceMemory -= bytes_ghost_length/(1024.*1024.);
printfQuda("Device memory in used is %f MB D \n",G_deviceMemory);
}
}
#endif
}
void QKXTM_Field::create_all(){
create_host();
create_host_ghost();
create_device();
// create_device_ghost(); // with hipMemcpy2D dont need it
zero();
}
void QKXTM_Field::destroy_all(){
destroy_host();
destroy_host_ghost();
destroy_device();
// destroy_device_ghost();
}
void QKXTM_Field::printInfo(){
printfQuda("GPU memory needed is %f MB \n",bytes_total_length/(1024.0 * 1024.0));
}
void QKXTM_Field::zero(){
memset(h_elem,0,bytes_total_length);
hipMemset(d_elem,0,bytes_total_length);
checkCudaError();
}
void QKXTM_Field::fourierCorr(double *corr, double *corrMom, int Nmom , int momElem[][3]){
// corrMom must be allocated with G_localL[3]*Nmom*4*4*2
// slowest is time then momentum then gamma then gamma1 then r,i
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, fieldTex, corr, Bytes() );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
hipMalloc((void**)&d_partial_block, gridDim.x*2 * sizeof(double) );
double reduction[2];
double globalReduction[2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
// hipLaunchKernelGGL(( fourierCorr_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
hipLaunchKernelGGL(( fourierPion_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] );
hipDeviceSynchronize();
hipMemcpy(h_partial_block , d_partial_block , gridDim.x*2 * sizeof(double) , hipMemcpyDeviceToHost);
memset(reduction , 0 , 2 * sizeof(double) );
for(int i =0 ; i < gridDim.x ; i++){
reduction[0] += h_partial_block[ i*2 + 0];
reduction[1] += h_partial_block[ i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
corrMom[it*Nmom*2 + imom*2 + 0] = globalReduction[0];
corrMom[it*Nmom*2 + imom*2 + 1] = globalReduction[1];
}
}
} // for all momenta
} // for all local timeslice
hipUnbindTexture(fieldTex);
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////// Class QKXTM_Gauge ////////////////////////////////////////
// $$ Section 8: Class QKXTM_Gauge $$
QKXTM_Gauge::QKXTM_Gauge():
gauge_binded_plaq(false) , packGauge_flag(false) , loadGauge_flag(false) , gauge_binded_ape(false), h_elem_backup(NULL)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nDim * G_nColor * G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Gauge::QKXTM_Gauge(NgaugeHost ngaugeHost):
gauge_binded_plaq(false) , packGauge_flag(false) , loadGauge_flag(false) , gauge_binded_ape(false), h_elem_backup(NULL)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nDim * G_nColor * G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
if(ngaugeHost == QKXTM_N1){
create_all();}
else{
create_all();
h_elem_backup =(double*) malloc(bytes_total_length);
if(h_elem_backup == NULL)errorQuda("Error allocate host memory for backup");
}
}
QKXTM_Gauge::~QKXTM_Gauge(){
destroy_all();
if(h_elem_backup != NULL)free(h_elem_backup);
if(gauge_binded_plaq == true) unbindGaugePlaq();
if(gauge_binded_ape == true) unbindGaugeAPE();
gauge_binded_plaq = false;
gauge_binded_ape = false;
}
void QKXTM_Gauge::bindGaugePlaq(){
if( gauge_binded_plaq == false ){
hipBindTexture(0,gaugeTexPlaq,d_elem,bytes_total_length);
checkCudaError();
}
gauge_binded_plaq = true;
}
void QKXTM_Gauge::unbindGaugePlaq(){
if(gauge_binded_plaq == true){
hipUnbindTexture(gaugeTexPlaq);
checkCudaError();
}
gauge_binded_plaq = false;
}
void QKXTM_Gauge::bindGaugeAPE(){
if( gauge_binded_ape == false ){
hipBindTexture(0,gaugeTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
gauge_binded_ape = true;
}
void QKXTM_Gauge::unbindGaugeAPE(){
if(gauge_binded_ape == true){
hipUnbindTexture(gaugeTexAPE);
checkCudaError();
}
gauge_binded_ape = false;
}
void QKXTM_Gauge::rebindGaugeAPE(){
hipUnbindTexture(gaugeTexAPE);
hipBindTexture(0,gaugeTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
void QKXTM_Gauge::packGauge(void **gauge){
// if(packGauge_flag == false){
double **p_gauge = (double**) gauge;
for(int dir = 0 ; dir < G_nDim ; dir++)
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
h_elem[dir*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part] = p_gauge[dir][iv*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part];
}
printfQuda("Gauge qkxTM packed on gpu form\n");
// packGauge_flag = true;
// }
}
void QKXTM_Gauge::loadGauge(){
//if((packGauge_flag == true) && (loadGauge_flag == false)){
hipMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), hipMemcpyHostToDevice );
checkCudaError();
// loadGauge_flag = true;
printfQuda("Gauge qkxTM loaded on gpu\n");
// }
}
void QKXTM_Gauge::justDownloadGauge(){
//if((packGauge_flag == true) && (loadGauge_flag == false)){
hipMemcpy(h_elem,d_elem,(bytes_total_length - bytes_ghost_length), hipMemcpyDeviceToHost );
checkCudaError();
// loadGauge_flag = true;
printfQuda("GaugeApe just downloaded\n");
// }
}
void QKXTM_Gauge::packGaugeToBackup(void **gauge){
double **p_gauge = (double**) gauge;
for(int dir = 0 ; dir < G_nDim ; dir++)
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
h_elem_backup[dir*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part] = p_gauge[dir][iv*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part];
}
printfQuda("Gauge qkxTM packed on gpu form on backupHost\n");
}
void QKXTM_Gauge::loadGaugeFromBackup(){
hipMemcpy(d_elem,h_elem_backup,(bytes_total_length - bytes_ghost_length), hipMemcpyHostToDevice );
checkCudaError();
printfQuda("Gauge qkxTM loaded on gpu from backupHost\n");
}
void QKXTM_Gauge::ghostToHost(){ // gpu collect ghost and send it to host
// direction x ////////////////////////////////////
#ifdef MULTI_GPU
if( G_localL[0] < G_totalL[0]){
int position;
int height = G_localL[1] * G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = 2*sizeof(double);
size_t spitch = G_localL[0]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = (G_localL[0]-1)*G_localL[1]*G_localL[2]*G_localL[3];
position = G_localL[0]-1;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[0]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[0]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction y ///////////////////////////////////
if( G_localL[1] < G_totalL[1]){
int position;
int height = G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[1]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*(G_localL[1]-1)*G_localL[2]*G_localL[3];
position = G_localL[0]*(G_localL[1]-1);
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[1]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[1]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction z //////////////////////////////////
if( G_localL[2] < G_totalL[2]){
int position;
int height = G_localL[3]; // number of blocks that we need
size_t width = G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[2]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*G_localL[1]*(G_localL[2]-1)*G_localL[3];
position = G_localL[0]*G_localL[1]*(G_localL[2]-1);
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[2]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[2]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// printfQuda("before copy device to host\n");
// direction t /////////////////////////////////////
if( G_localL[3] < G_totalL[3]){
int position;
int height = G_nDim*G_nColor*G_nColor;
size_t width = G_localL[2]*G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[3]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
position = G_localL[0]*G_localL[1]*G_localL[2]*(G_localL[3]-1);
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_minusGhost[3]*G_nDim*G_nColor*G_nColor*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
// set minus points to plus area
position = 0;
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_plusGhost[3]*G_nDim*G_nColor*G_nColor*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
checkCudaError();
#endif
}
void QKXTM_Gauge::cpuExchangeGhost(){ // cpus exchange links
#ifdef MULTI_GPU
if(comm_size() > 1){
MPI_Request request_recv[2*G_nDim];
MPI_Request request_send[2*G_nDim];
int back_nbr[4] = {X_BACK_NBR,Y_BACK_NBR,Z_BACK_NBR,T_BACK_NBR};
int fwd_nbr[4] = {X_FWD_NBR,Y_FWD_NBR,Z_FWD_NBR,T_FWD_NBR};
double *pointer_receive = NULL;
double *pointer_send = NULL;
// direction x
if(G_localL[0] < G_totalL[0]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[0]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[0]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[0]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[0], 0, &(request_recv[0]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[0], 0, &(request_send[0]));
comm_wait(&(request_recv[0])); // blocking until receive finish
comm_wait(&(request_send[0]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[0]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[0]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[0], 1, &(request_recv[1]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[0], 1, &(request_send[1]));
comm_wait(&(request_recv[1])); // blocking until receive finish
comm_wait(&(request_send[1]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction y
if(G_localL[1] < G_totalL[1]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[1]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[1]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[1]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[1], 2, &(request_recv[2]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[1], 2, &(request_send[2]));
comm_wait(&(request_recv[2])); // blocking until receive finish
comm_wait(&(request_send[2]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[1]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[1]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[1], 3, &(request_recv[3]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[1], 3, &(request_send[3]));
comm_wait(&(request_recv[3])); // blocking until receive finish
comm_wait(&(request_send[3]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction z
if(G_localL[2] < G_totalL[2]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[2]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[2]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[2]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[2], 4, &(request_recv[4]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[2], 4, &(request_send[4]));
comm_wait(&(request_recv[4])); // blocking until receive finish
comm_wait(&(request_send[4]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[2]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[2]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[2], 5, &(request_recv[5]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[2], 5, &(request_send[5]));
comm_wait(&(request_recv[5])); // blocking until receive finish
comm_wait(&(request_send[5]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction t
if(G_localL[3] < G_totalL[3]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
// printfQuda("Here\n");
size_t nbytes = G_surface3D[3]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[3]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[3]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[3], 6, &(request_recv[6]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[3], 6, &(request_send[6]));
comm_wait(&(request_recv[6])); // blocking until receive finish
comm_wait(&(request_send[6]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[3]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[3]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[3], 7, &(request_recv[7]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[3], 7, &(request_send[7]));
comm_wait(&(request_recv[7])); // blocking until receive finish
comm_wait(&(request_send[7]));
pointer_receive = NULL;
pointer_send = NULL;
}
}
#endif
}
void QKXTM_Gauge::ghostToDevice(){ // simple cudamemcpy to send ghost to device
#ifdef MULTI_GPU
if(comm_size() > 1){
double *host = h_ext_ghost;
double *device = d_elem + G_localVolume*G_nColor*G_nColor*G_nDim*2;
hipMemcpy(device,host,bytes_ghost_length,hipMemcpyHostToDevice);
checkCudaError();
}
#endif
}
double QKXTM_Gauge::norm2Host(){
double res = 0.;
for(int i = 0 ; i < G_nDim*G_nColor*G_nColor*G_localVolume ; i++){
res += h_elem[i*2 + 0]*h_elem[i*2 + 0] + h_elem[i*2 + 1]*h_elem[i*2 + 1];
}
#ifdef MULTI_GPU
double globalRes;
int rc = MPI_Allreduce(&res , &globalRes , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalRes ;
#else
return res;
#endif
}
double QKXTM_Gauge::norm2Device(){
double *h_partial = NULL;
double *d_partial = NULL;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
h_partial = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial == NULL) errorQuda("Error allocate memory for host partial plaq");
hipMalloc((void**)&d_partial, gridDim.x * sizeof(double));
hipBindTexture(0,gaugeTexNorm2,d_elem, Bytes() - BytesGhost() );
hipLaunchKernelGGL(( norm2Gauge_kernel), dim3(gridDim),dim3(blockDim), 0, 0, d_partial);
hipDeviceSynchronize();
hipUnbindTexture(gaugeTexNorm2);
hipMemcpy(h_partial, d_partial , gridDim.x * sizeof(double) , hipMemcpyDeviceToHost);
double norm2 = 0.;
// simple host reduction
for(int i = 0 ; i < gridDim.x ; i++)
norm2 += h_partial[i];
free(h_partial);
hipFree(d_partial);
h_partial = NULL;
d_partial = NULL;
checkCudaError();
#ifdef MULTI_GPU
double globalNorm2;
int rc = MPI_Allreduce(&norm2 , &globalNorm2 , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for norm2");
return globalNorm2 ;
#else
return norm2;
#endif
}
double QKXTM_Gauge::calculatePlaq(){
if(gauge_binded_plaq == false) bindGaugePlaq();
// if(packGauge_flag == false) packGauge(gauge); // you must to do it in the executable because I will calculate plaquette for APE gauge
// if(loadGauge_flag == false) loadGauge();
ghostToHost(); // collect surface from device and send it to host
// comm_barrier();
cpuExchangeGhost(); // cpus exchange surfaces with previous and forward proc all dir
ghostToDevice(); // now the host send surface to device
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *h_partial_plaq = NULL;
double *d_partial_plaq = NULL;
h_partial_plaq = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial_plaq == NULL) errorQuda("Error allocate memory for host partial plaq");
hipMalloc((void**)&d_partial_plaq, gridDim.x * sizeof(double));
// cudaPrintfInit();
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( calculatePlaq_kernel), dim3(gridDim),dim3(blockDim), 0, 0, d_partial_plaq);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
// if(comm_rank() == 0) cudaPrintfDisplay(stdout,true);
// cudaPrintfEnd();
hipEventDestroy(start);
hipEventDestroy(stop);
// printfQuda("Elapsed time for plaquette kernel is %f ms\n",elapsedTime);
// now copy partial plaq to host
hipMemcpy(h_partial_plaq, d_partial_plaq , gridDim.x * sizeof(double) , hipMemcpyDeviceToHost);
double plaquette = 0.;
#ifdef MULTI_GPU
double globalPlaquette = 0.;
#endif
// simple host reduction on plaq
for(int i = 0 ; i < gridDim.x ; i++)
plaquette += h_partial_plaq[i];
free(h_partial_plaq);
hipFree(d_partial_plaq);
h_partial_plaq = NULL;
d_partial_plaq = NULL;
checkCudaError();
unbindGaugePlaq();
#ifdef MULTI_GPU
int rc = MPI_Allreduce(&plaquette , &globalPlaquette , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalPlaquette/(G_totalVolume*G_nColor*6) ;
#else
return plaquette/(G_totalVolume*G_nColor*6);
#endif
}
void QKXTM_Gauge::checkSum(){
justDownloadGauge(); //gpuformat
double *M = H_elem();
double sum_real,sum_imag;
sum_real = 0.;
sum_imag = 0.;
int mu =0;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
mu =1;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
mu =2;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
mu =3;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void quda::UxMomentumPhase(QKXTM_Gauge &gaugeAPE, int px, int py, int pz, double zeta){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( UxMomentumPhase_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) gaugeAPE.D_elem(), px, py, pz, zeta);
hipDeviceSynchronize();
checkCudaError();
}
void quda::APE_smearing(QKXTM_Gauge &gaugeAPE , QKXTM_Gauge &gaugeTmp){// this is a function not a routine which perform smearing , need two QKXTM_Gauge objects
// if(G_nsmearAPE == 0) errorQuda("You cant call APE_smearing with G_nsmearAPE = 0"); // for G_nsmearAPE == 0 just copy to APE
QKXTM_Propagator *prop = new QKXTM_Propagator(); // the constructor allocate memory on gpu for propagator I will use it for staple
QKXTM_Propagator &prp = *prop; // take reference class
#ifdef _PROPAGATOR_APE_TEX
prp.bindPropagatorAPE(); // need to bind propagator to texture because it will be input in kernel2
#endif
// create pointer to classes , only pointer no memory allocation because I didnt call the construnctor
QKXTM_Gauge *in = NULL;
QKXTM_Gauge *out = NULL;
QKXTM_Gauge *tmp = NULL;
in = &(gaugeTmp);
out = &(gaugeAPE);
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
printfQuda("Perform APE smearing\n");
for(int iter = 0 ; iter < G_nsmearAPE ; iter++){
//rebind texture to "in" gauge field
in->rebindGaugeAPE(); // now texture show to "in" gauge
//communicate "in" gauge field
in->ghostToHost();
in->cpuExchangeGhost(); // perform communication of the gauge
in->ghostToDevice();
// cudaPrintfInit();
//kernel_1 first phase of APE smearing
hipLaunchKernelGGL(( APE_kernel_1), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp.D_elem() ,(double2*) out->D_elem() );
hipDeviceSynchronize(); // we need to block until the kernel finish
//communicate propagator
prp.ghostToHost();
prp.cpuExchangeGhost(); // perform communication of the gauge in propagator structure
prp.ghostToDevice();
//kernel_2 second phase of APE smearing and SU3 projection
#ifdef _PROPAGATOR_APE_TEX
hipLaunchKernelGGL(( APE_kernel_2), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out->D_elem() );
#else
hipLaunchKernelGGL(( APE_kernel_2), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp.D_elem(),(double2*) out->D_elem() );
#endif
hipDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
tmp=in;
in=out;
out=tmp; // swap glasses
checkCudaError();
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
// printfQuda("Elapsed time for APE smearing kernel is %f ms\n",elapsedTime);
if((G_nsmearAPE%2) == 0){
out->unbindGaugeAPE();
hipMemcpy(gaugeAPE.D_elem(),gaugeTmp.D_elem(), gaugeAPE.Bytes() - gaugeAPE.BytesGhost(), hipMemcpyDeviceToDevice);
}
else{
out->unbindGaugeAPE();
hipMemcpy(gaugeAPE.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , gaugeTmp.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , G_nColor*G_nColor*G_localVolume*2*sizeof(double) , hipMemcpyDeviceToDevice);
}
checkCudaError();
delete prop;
}
void quda::APE_smearing(QKXTM_Gauge &gaugeAPE , QKXTM_Gauge &gaugeTmp, QKXTM_Propagator &prp){// this is a function not a routine which perform smearing , need two QKXTM_Gauge objects
// if(G_nsmearAPE == 0) errorQuda("You cant call APE_smearing with G_nsmearAPE = 0"); // for G_nsmearAPE == 0 just copy to APE
// QKXTM_Propagator *prop = new QKXTM_Propagator(); // the constructor allocate memory on gpu for propagator I will use it for staple
// QKXTM_Propagator &prp = *prop; // take reference class
if(G_nsmearAPE == 0) return;
#ifdef _PROPAGATOR_APE_TEX
prp.bindPropagatorAPE(); // need to bind propagator to texture because it will be input in kernel2
#endif
// create pointer to classes , only pointer no memory allocation because I didnt call the construnctor
QKXTM_Gauge *in = NULL;
QKXTM_Gauge *out = NULL;
QKXTM_Gauge *tmp = NULL;
in = &(gaugeTmp);
out = &(gaugeAPE);
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
printfQuda("Perform APE smearing\n");
for(int iter = 0 ; iter < G_nsmearAPE ; iter++){
//rebind texture to "in" gauge field
in->rebindGaugeAPE(); // now texture show to "in" gauge
//communicate "in" gauge field
in->ghostToHost();
in->cpuExchangeGhost(); // perform communication of the gauge
in->ghostToDevice();
cudaPrintfInit();
//kernel_1 first phase of APE smearing
hipLaunchKernelGGL(( APE_kernel_1), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp.D_elem() ,(double2*) out->D_elem() );
hipDeviceSynchronize(); // we need to block until the kernel finish
//communicate propagator
prp.ghostToHost();
prp.cpuExchangeGhost(); // perform communication of the gauge in propagator structure
prp.ghostToDevice();
//kernel_2 second phase of APE smearing and SU3 projection
#ifdef _PROPAGATOR_APE_TEX
hipLaunchKernelGGL(( APE_kernel_2), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out->D_elem() );
#else
hipLaunchKernelGGL(( APE_kernel_2), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp.D_elem() ,(double2*) out->D_elem() );
#endif
hipDeviceSynchronize();
if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
cudaPrintfEnd();
tmp=in;
in=out;
out=tmp; // swap glasses
checkCudaError();
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
prp.unbindPropagatorAPE();
// printfQuda("Elapsed time for APE smearing kernel is %f ms\n",elapsedTime);
if((G_nsmearAPE%2) == 0){
out->unbindGaugeAPE();
hipMemcpy(gaugeAPE.D_elem(),gaugeTmp.D_elem(), gaugeAPE.Bytes() - gaugeAPE.BytesGhost(), hipMemcpyDeviceToDevice);
}
else{
out->unbindGaugeAPE();
hipMemcpy(gaugeAPE.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , gaugeTmp.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , G_nColor*G_nColor*G_localVolume*2*sizeof(double) , hipMemcpyDeviceToDevice);
}
checkCudaError();
// delete prop;
// printfQuda("after delete prop\n");
}
void quda::HYP3D_smearing(QKXTM_Gauge &gaugeHYP , QKXTM_Gauge &gaugeTmp, QKXTM_Propagator &prp1, QKXTM_Propagator &prp2){// this is a function not a routine which perform smearing , need two QKXTM_Gauge objects
// hipBindTexture(0,gaugeTexAPE,d_elem,bytes_total_length);
// hipBindTexture(0,propagatorTexAPE,d_elem,bytes_total_length);
if(G_nsmearHYP == 0)return;
// create pointer to classes , only pointer no memory allocation because I didnt call the construnctor
QKXTM_Gauge *in = NULL;
QKXTM_Gauge *out = NULL;
QKXTM_Gauge *tmp = NULL;
in = &(gaugeTmp);
out = &(gaugeHYP);
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
printfQuda("Perform HYP 3D smearing\n");
for(int iter = 0 ; iter < G_nsmearHYP ; iter++){
//step 1
hipBindTexture(0,gaugeTexHYP,in->D_elem(),in->Bytes());
in->ghostToHost();
in->cpuExchangeGhost();
in->ghostToDevice();
//step2
hipLaunchKernelGGL(( HYP3D_kernel_1), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp1.D_elem());
hipDeviceSynchronize();
//step3
hipBindTexture(0,propagatorTexHYP,prp1.D_elem(),prp1.Bytes());
prp1.ghostToHost();
prp1.cpuExchangeGhost();
prp1.ghostToDevice();
//step4
hipLaunchKernelGGL(( HYP3D_kernel_2), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp2.D_elem() );
hipDeviceSynchronize();
//step5
hipUnbindTexture(propagatorTexHYP);
hipBindTexture(0,propagatorTexHYP,prp2.D_elem(),prp2.Bytes());
hipLaunchKernelGGL(( HYP3D_kernel_3), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp2.D_elem(),G_omega2HYP);
hipDeviceSynchronize();
prp2.ghostToHost();
prp2.cpuExchangeGhost();
prp2.ghostToDevice();
// check the sum
//step6
hipLaunchKernelGGL(( HYP3D_kernel_4), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) prp1.D_elem(),(double2*) out->D_elem());
hipDeviceSynchronize();
// out->checkSum();
//step7
hipUnbindTexture(propagatorTexHYP);
hipBindTexture(0,propagatorTexHYP,prp1.D_elem(),prp1.Bytes());
prp1.ghostToHost();
prp1.cpuExchangeGhost();
prp1.ghostToDevice();
//step8
hipLaunchKernelGGL(( HYP3D_kernel_5), dim3(gridDim),dim3(blockDim), 0, 0, (double2*)out->D_elem(),G_omega1HYP);
hipDeviceSynchronize();
//step9
hipUnbindTexture(propagatorTexHYP);
hipUnbindTexture(gaugeTexHYP);
tmp=in;
in=out;
out=tmp; // swap classes
checkCudaError();
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
if((G_nsmearHYP%2) == 0){
hipMemcpy(gaugeHYP.D_elem(),gaugeTmp.D_elem(), gaugeHYP.Bytes() - gaugeHYP.BytesGhost(), hipMemcpyDeviceToDevice);
}
else{
hipMemcpy(gaugeHYP.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , gaugeTmp.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , G_nColor*G_nColor*G_localVolume*2*sizeof(double) , hipMemcpyDeviceToDevice);
}
checkCudaError();
}
double* quda::createWilsonPath(QKXTM_Gauge &gauge,int direction ){
double* deviceWilsonPath = NULL;
hipBindTexture(0,gaugePath,gauge.D_elem(),gauge.Bytes());
checkCudaError();
hipMalloc((void**)&deviceWilsonPath,(G_localVolume*9*G_totalL[direction]/2)*2*sizeof(double) ); //we choose z direction and \Delta{Z} until the half spatial direction
checkCudaError();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( createWilsonPath_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) deviceWilsonPath,direction);
hipDeviceSynchronize();
hipUnbindTexture(gaugePath);
checkCudaError();
return deviceWilsonPath;
}
double* quda::createWilsonPathBwd(QKXTM_Gauge &gauge,int direction ){
double* deviceWilsonPath = NULL;
hipBindTexture(0,gaugePath,gauge.D_elem(),gauge.Bytes());
checkCudaError();
hipMalloc((void**)&deviceWilsonPath,(G_localVolume*9*G_totalL[direction]/2)*2*sizeof(double) ); //we choose z direction and \Delta{Z} until the half spatial direction
checkCudaError();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( createWilsonPathBwd_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) deviceWilsonPath,direction);
hipDeviceSynchronize();
hipUnbindTexture(gaugePath);
checkCudaError();
return deviceWilsonPath;
}
double* quda::createWilsonPath(QKXTM_Gauge &gauge){
double* deviceWilsonPath = NULL;
hipBindTexture(0,gaugePath,gauge.D_elem(),gauge.Bytes());
checkCudaError();
if( (G_totalL[0] != G_totalL[1]) || (G_totalL[0] != G_totalL[2])){
printfQuda("Lattice length must be equal in spatial directions\n");
}
hipMalloc((void**)&deviceWilsonPath,(G_localVolume*9*(G_totalL[0]/2)*3)*2*sizeof(double) ); //we choose z direction and \Delta{Z} until the half spatial direction
checkCudaError();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( createWilsonPath_kernel_all), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) deviceWilsonPath);
hipDeviceSynchronize();
hipUnbindTexture(gaugePath);
checkCudaError();
return deviceWilsonPath;
}
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////// class QKXTM_Vector ////////////////////////////////////////////////////////////////
// $$ Section 9: Class QKXTM_Vector $$
QKXTM_Vector::QKXTM_Vector():
vector_binded_gauss(false) , packVector_flag(false) , loadVector_flag(false)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Vector::~QKXTM_Vector(){
destroy_all();
if(vector_binded_gauss == true) unbindVectorGauss();
vector_binded_gauss = false;
}
void QKXTM_Vector::packVector(void *vector){
if(packVector_flag == false){
double *p_vector = (double*) vector;
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int part = 0 ; part < 2 ; part++){
h_elem[mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + iv*2 + part] = p_vector[iv*G_nSpin*G_nColor*2 + mu*G_nColor*2 + c1*2 + part];
}
printfQuda("Vector qkxTM packed on gpu form\n");
packVector_flag = true;
}
}
void QKXTM_Vector::loadVector(){
if(packVector_flag == true && loadVector_flag == false){
hipMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), hipMemcpyHostToDevice );
checkCudaError();
loadVector_flag = true;
printfQuda("Vector qkxTM loaded on gpu\n");
}
}
void QKXTM_Vector::ghostToHost(){ // gpu collect ghost and send it to host
// direction x ////////////////////////////////////
#ifdef MULTI_GPU
if( G_localL[0] < G_totalL[0]){
int position;
int height = G_localL[1] * G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = 2*sizeof(double);
size_t spitch = G_localL[0]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = (G_localL[0]-1)*G_localL[1]*G_localL[2]*G_localL[3];
position = (G_localL[0]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[0]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[0]*2 + c1*G_surface3D[0]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[0]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[0]*2 + c1*G_surface3D[0]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction y ///////////////////////////////////
if( G_localL[1] < G_totalL[1]){
int position;
int height = G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[1]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*(G_localL[1]-1)*G_localL[2]*G_localL[3];
position = G_localL[0]*(G_localL[1]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[1]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[1]*2 + c1*G_surface3D[1]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[1]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[1]*2 + c1*G_surface3D[1]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction z //////////////////////////////////
if( G_localL[2] < G_totalL[2]){
int position;
int height = G_localL[3]; // number of blocks that we need
size_t width = G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[2]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*G_localL[1]*(G_localL[2]-1)*G_localL[3];
position = G_localL[0]*G_localL[1]*(G_localL[2]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[2]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[2]*2 + c1*G_surface3D[2]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[2]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[2]*2 + c1*G_surface3D[2]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction t /////////////////////////////////////
if( G_localL[3] < G_totalL[3]){
int position;
int height = G_nSpin*G_nColor;
size_t width = G_localL[2]*G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[3]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
position = G_localL[0]*G_localL[1]*G_localL[2]*(G_localL[3]-1);
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_minusGhost[3]*G_nSpin*G_nColor*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
// set minus points to plus area
position = 0;
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_plusGhost[3]*G_nSpin*G_nColor*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
#endif
}
void QKXTM_Vector::cpuExchangeGhost(){ // cpus exchange links
#ifdef MULTI_GPU
if(comm_size() > 1){
MPI_Request request_recv[2*G_nDim];
MPI_Request request_send[2*G_nDim];
int back_nbr[4] = {X_BACK_NBR,Y_BACK_NBR,Z_BACK_NBR,T_BACK_NBR};
int fwd_nbr[4] = {X_FWD_NBR,Y_FWD_NBR,Z_FWD_NBR,T_FWD_NBR};
// direction x
if(G_localL[0] < G_totalL[0]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[0]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[0]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[0]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[0], 0, &(request_recv[0]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[0], 0, &(request_send[0]));
comm_wait(&(request_recv[0])); // blocking until receive finish
comm_wait(&(request_send[0]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[0]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[0]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[0], 1, &(request_recv[1]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[0], 1, &(request_send[1]));
comm_wait(&(request_recv[1])); // blocking until receive finish
comm_wait(&(request_send[1]));
}
// direction y
if(G_localL[1] < G_totalL[1]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[1]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[1]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[1]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[1], 2, &(request_recv[2]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[1], 2, &(request_send[2]));
comm_wait(&(request_recv[2])); // blocking until receive finish
comm_wait(&(request_send[2]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[1]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[1]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[1], 3, &(request_recv[3]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[1], 3, &(request_send[3]));
comm_wait(&(request_recv[3])); // blocking until receive finish
comm_wait(&(request_send[3]));
}
// direction z
if(G_localL[2] < G_totalL[2]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[2]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[2]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[2]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[2], 4, &(request_recv[4]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[2], 4, &(request_send[4]));
comm_wait(&(request_recv[4])); // blocking until receive finish
comm_wait(&(request_send[4]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[2]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[2]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[2], 5, &(request_recv[5]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[2], 5, &(request_send[5]));
comm_wait(&(request_recv[5])); // blocking until receive finish
comm_wait(&(request_send[5]));
}
// direction t
if(G_localL[3] < G_totalL[3]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[3]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[3]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[3]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[3], 6, &(request_recv[6]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[3], 6, &(request_send[6]));
comm_wait(&(request_recv[6])); // blocking until receive finish
comm_wait(&(request_send[6]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[3]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[3]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[3], 7, &(request_recv[7]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[3], 7, &(request_send[7]));
comm_wait(&(request_recv[7])); // blocking until receive finish
comm_wait(&(request_send[7]));
}
}
#endif
}
void QKXTM_Vector::ghostToDevice(){ // simple cudamemcpy to send ghost to device
#ifdef MULTI_GPU
if(comm_size() > 1){
double *host = h_ext_ghost;
double *device = d_elem + G_localVolume*G_nSpin*G_nColor*2;
hipMemcpy(device,host,bytes_ghost_length,hipMemcpyHostToDevice);
checkCudaError();
}
#endif
}
void QKXTM_Vector::bindVectorGauss(){
if( vector_binded_gauss == false ){
hipBindTexture(0,vectorTexGauss,d_elem,bytes_total_length);
checkCudaError();
}
vector_binded_gauss = true;
}
void QKXTM_Vector::unbindVectorGauss(){
if(vector_binded_gauss == true){
hipUnbindTexture(vectorTexGauss);
checkCudaError();
}
vector_binded_gauss = false;
}
void QKXTM_Vector::rebindVectorGauss(){
hipUnbindTexture(vectorTexGauss);
hipBindTexture(0,vectorTexGauss,d_elem,bytes_total_length);
checkCudaError();
}
void QKXTM_Vector::download(){
hipMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , hipMemcpyDeviceToHost);
checkCudaError();
double *vector_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(vector_tmp == NULL)errorQuda("Error in allocate memory of tmp vector");
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int part = 0 ; part < 2 ; part++){
vector_tmp[iv*G_nSpin*G_nColor*2 + mu*G_nColor*2 + c1*2 + part] = h_elem[mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + iv*2 + part];
}
memcpy(h_elem,vector_tmp,Bytes() - BytesGhost());
free(vector_tmp);
vector_tmp = NULL;
}
double QKXTM_Vector::norm2Host(){
double res = 0.;
for(int i = 0 ; i < G_nSpin*G_nColor*G_localVolume ; i++){
res += h_elem[i*2 + 0]*h_elem[i*2 + 0] + h_elem[i*2 + 1]*h_elem[i*2 + 1];
}
#ifdef MULTI_GPU
double globalRes;
int rc = MPI_Allreduce(&res , &globalRes , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalRes ;
#else
return res;
#endif
}
double QKXTM_Vector::norm2Device(){
double *h_partial = NULL;
double *d_partial = NULL;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
h_partial = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial == NULL) errorQuda("Error allocate memory for host partial plaq");
hipMalloc((void**)&d_partial, gridDim.x * sizeof(double));
hipBindTexture(0,vectorTexNorm2,d_elem, Bytes() - BytesGhost() );
hipLaunchKernelGGL(( norm2Vector_kernel), dim3(gridDim),dim3(blockDim), 0, 0, d_partial);
hipDeviceSynchronize();
hipUnbindTexture(vectorTexNorm2);
hipMemcpy(h_partial, d_partial , gridDim.x * sizeof(double) , hipMemcpyDeviceToHost);
double norm2 = 0.;
// simple host reduction
for(int i = 0 ; i < gridDim.x ; i++)
norm2 += h_partial[i];
free(h_partial);
hipFree(d_partial);
h_partial = NULL;
d_partial = NULL;
checkCudaError();
#ifdef MULTI_GPU
double globalNorm2;
int rc = MPI_Allreduce(&norm2 , &globalNorm2 , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for norm2");
return globalNorm2 ;
#else
return norm2;
#endif
}
void QKXTM_Vector::uploadToCuda(cudaColorSpinorField &cudaVector){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
// cudaPrintfInit();
hipLaunchKernelGGL(( uploadToCuda_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_elem , (double2*) pointEven, (double2*) pointOdd);
//cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::downloadFromCuda(cudaColorSpinorField &cudaVector){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
hipLaunchKernelGGL(( downloadFromCuda_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_elem , (double2*) pointEven, (double2*) pointOdd);
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::flagsToFalse(){
packVector_flag = false;
loadVector_flag = false;
}
void QKXTM_Vector::scaleVector(double a){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( scaleVector_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_elem, a);
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::copyPropagator3D(QKXTM_Propagator3D &prop, int timeslice, int nu , int c2){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + timeslice*G_localVolume3D*2 ;
pointer_src = prop.D_elem() + mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2;
hipMemcpy(pointer_dst, pointer_src, G_localVolume3D*2 * sizeof(double), hipMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Vector::copyPropagator(QKXTM_Propagator &prop, int nu , int c2){
double *pointer_src = NULL;
double *pointer_dst = NULL;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 ;
pointer_src = prop.D_elem() + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2;
hipMemcpy(pointer_dst, pointer_src, G_localVolume*2 *sizeof(double), hipMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Vector::conjugate(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( conjugate_vector_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) D_elem() );
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::applyGamma5(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( apply_gamma5_vector_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) D_elem() );
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::applyGammaTransformation(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( apply_gamma_transf_vector_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) D_elem() );
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::applyMomentum(int nx, int ny, int nz){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( apply_momentum_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) D_elem() , nx , ny , nz);
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::getVectorProp3D(QKXTM_Propagator3D &prop1, int timeslice,int nu,int c2){
// cudaPrintfInit();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
//cudaPrintfInit();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, propagator3DTex1, prop1.D_elem(), prop1.Bytes());
hipLaunchKernelGGL(( getVectorProp3D_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) this->D_elem(), timeslice , nu, c2);
hipDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
hipUnbindTexture(propagator3DTex1);
// cudaPrintfEnd();
checkCudaError();
}
// new addition
#include <lime.h>
static void qcd_swap_8(double *Rd, int N)
{
register char *i,*j,*k;
char swap;
char *max;
char *R = (char*) Rd;
max = R+(N<<3);
for(i=R;i<max;i+=8)
{
j=i; k=j+7;
swap = *j; *j = *k; *k = swap;
j++; k--;
swap = *j; *j = *k; *k = swap;
j++; k--;
swap = *j; *j = *k; *k = swap;
j++; k--;
swap = *j; *j = *k; *k = swap;
}
}
static int qcd_isBigEndian()
{
union{
char C[4];
int R ;
}word;
word.R=1;
if(word.C[3]==1) return 1;
if(word.C[0]==1) return 0;
return -1;
}
void QKXTM_Vector::write(char *filename){
FILE *fid;
int error_in_header=0;
LimeWriter *limewriter;
LimeRecordHeader *limeheader = NULL;
int ME_flag=0, MB_flag=0, limeStatus;
u_int64_t message_length;
MPI_Offset offset;
MPI_Datatype subblock; //MPI-type, 5d subarray
MPI_File mpifid;
MPI_Status status;
int sizes[5], lsizes[5], starts[5];
long int i;
int chunksize,mu,c1;
char *buffer;
int x,y,z,t;
char tmp_string[2048];
if(comm_rank() == 0){ // master will write the lime header
fid = fopen(filename,"w");
if(fid == NULL){
fprintf(stderr,"Error open file to write propagator in %s \n",__func__);
comm_exit(-1);
}
else{
limewriter = limeCreateWriter(fid);
if(limewriter == (LimeWriter*)NULL) {
fprintf(stderr, "Error in %s. LIME error in file for writing! in %s\n", __func__);
error_in_header=1;
comm_exit(-1);
}
else
{
sprintf(tmp_string, "DiracFermion_Sink");
message_length=(long int) strlen(tmp_string);
MB_flag=1; ME_flag=1;
limeheader = limeCreateHeader(MB_flag, ME_flag, "propagator-type", message_length);
if(limeheader == (LimeRecordHeader*)NULL)
{
fprintf(stderr, "Error in %s. LIME create header error.\n", __func__);
error_in_header=1;
comm_exit(-1);
}
limeStatus = limeWriteRecordHeader(limeheader, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
limeDestroyHeader(limeheader);
limeStatus = limeWriteRecordData(tmp_string, &message_length, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header error %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
sprintf(tmp_string, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<etmcFormat>\n\t<field>diracFermion</field>\n\t<precision>64</precision>\n\t<flavours>1</flavours>\n\t<lx>%d</lx>\n\t<ly>%d</ly>\n\t<lz>%d</lz>\n\t<lt>%d</lt>\n\t<spin>4</spin>\n\t<colour>3</colour>\n</etmcFormat>", G_totalL[0], G_totalL[1], G_totalL[2], G_totalL[3]);
message_length=(long int) strlen(tmp_string);
MB_flag=1; ME_flag=1;
limeheader = limeCreateHeader(MB_flag, ME_flag, "quda-propagator-format", message_length);
if(limeheader == (LimeRecordHeader*)NULL)
{
fprintf(stderr, "Error in %s. LIME create header error.\n", __func__);
error_in_header=1;
comm_exit(-1);
}
limeStatus = limeWriteRecordHeader(limeheader, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
limeDestroyHeader(limeheader);
limeStatus = limeWriteRecordData(tmp_string, &message_length, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header error %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
message_length = G_totalVolume*4*3*2*sizeof(double);
MB_flag=1; ME_flag=1;
limeheader = limeCreateHeader(MB_flag, ME_flag, "scidac-binary-data", message_length);
limeStatus = limeWriteRecordHeader( limeheader, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header error %d\n", __func__, limeStatus);
error_in_header=1;
}
limeDestroyHeader( limeheader );
}
message_length=1;
limeWriteRecordData(tmp_string, &message_length, limewriter);
limeDestroyWriter(limewriter);
offset = ftell(fid)-1;
fclose(fid);
}
}
MPI_Bcast(&offset,sizeof(MPI_Offset),MPI_BYTE,0,MPI_COMM_WORLD);
sizes[0]=G_totalL[3];
sizes[1]=G_totalL[2];
sizes[2]=G_totalL[1];
sizes[3]=G_totalL[0];
sizes[4]=4*3*2;
lsizes[0]=G_localL[3];
lsizes[1]=G_localL[2];
lsizes[2]=G_localL[1];
lsizes[3]=G_localL[0];
lsizes[4]=sizes[4];
starts[0]=comm_coords(3)*G_localL[3];
starts[1]=comm_coords(2)*G_localL[2];
starts[2]=comm_coords(1)*G_localL[1];
starts[3]=comm_coords(0)*G_localL[0];
starts[4]=0;
MPI_Type_create_subarray(5,sizes,lsizes,starts,MPI_ORDER_C,MPI_DOUBLE,&subblock);
MPI_Type_commit(&subblock);
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifid);
MPI_File_set_view(mpifid, offset, MPI_FLOAT, subblock, "native", MPI_INFO_NULL);
chunksize=4*3*2*sizeof(double);
buffer = (char*) malloc(chunksize*G_localVolume);
if(buffer==NULL)
{
fprintf(stderr,"Error in %s! Out of memory\n", __func__);
comm_exit(-1);
}
i=0;
for(t=0; t<G_localL[3];t++)
for(z=0; z<G_localL[2];z++)
for(y=0; y<G_localL[1];y++)
for(x=0; x<G_localL[0];x++)
for(mu=0; mu<4; mu++)
for(c1=0; c1<3; c1++) // works only for QUDA_DIRAC_ORDER (color inside spin)
{
((double *)buffer)[i] = h_elem[t*G_localL[2]*G_localL[1]*G_localL[0]*4*3*2 + z*G_localL[1]*G_localL[0]*4*3*2 + y*G_localL[0]*4*3*2 + x*4*3*2 + mu*3*2 + c1*2 + 0];
((double *)buffer)[i+1] = h_elem[t*G_localL[2]*G_localL[1]*G_localL[0]*4*3*2 + z*G_localL[1]*G_localL[0]*4*3*2 + y*G_localL[0]*4*3*2 + x*4*3*2 + mu*3*2 + c1*2 + 1];
i+=2;
}
if(!qcd_isBigEndian())
qcd_swap_8((double*) buffer,2*4*3*G_localVolume);
MPI_File_write_all(mpifid, buffer, 4*3*2*G_localVolume, MPI_DOUBLE, &status);
free(buffer);
MPI_File_close(&mpifid);
MPI_Type_free(&subblock);
}
//
void quda::Gaussian_smearing(QKXTM_Vector &vectorGauss , QKXTM_Vector &vectorTmp , QKXTM_Gauge &gaugeAPE){
// if(G_nsmearGauss == 0) errorQuda("You cant run Gaussian_smearing with G_nsmearGauss = 0"); // for G_nsmearGauss == 0 just copy
// first communicate APE gauge
gaugeAPE.ghostToHost();
gaugeAPE.cpuExchangeGhost();
gaugeAPE.ghostToDevice();
QKXTM_Vector *in = NULL;
QKXTM_Vector *out = NULL;
QKXTM_Vector *tmp = NULL;
in = &(vectorTmp);
out = &(vectorGauss);
gaugeAPE.bindGaugeAPE();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
// cudaPrintfInit();
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
printfQuda("Permform Gaussian smearing\n");
for(int iter = 0 ; iter < G_nsmearGauss ; iter++){
in->ghostToHost();
in->cpuExchangeGhost();
in->ghostToDevice();
in->rebindVectorGauss();
hipLaunchKernelGGL(( Gauss_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) out->D_elem());
// cudaPrintfDisplay(stdout,true);
hipDeviceSynchronize();
checkCudaError();
tmp = in;
in = out;
out = tmp;
}
// cudaPrintfEnd();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
// printfQuda("Elapsed time for APE smearing kernel is %f ms\n",elapsedTime);
if( (G_nsmearGauss%2) == 0){
hipMemcpy(vectorGauss.D_elem() , vectorTmp.D_elem() , vectorGauss.Bytes() - vectorGauss.BytesGhost() , hipMemcpyDeviceToDevice);
}
gaugeAPE.unbindGaugeAPE();
}
void quda::seqSourceFixSinkPart1(QKXTM_Vector &vec, QKXTM_Propagator3D &prop1, QKXTM_Propagator3D &prop2, int timeslice,int nu,int c2, whatProjector typeProj , whatParticle testParticle){
// cudaPrintfInit();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
//cudaPrintfInit();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, propagator3DTex1, prop1.D_elem(), prop1.Bytes());
hipBindTexture(0, propagator3DTex2, prop2.D_elem(), prop2.Bytes());
hipLaunchKernelGGL(( seqSourceFixSinkPart1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec.D_elem(), timeslice , nu, c2, typeProj , testParticle );
hipDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
hipUnbindTexture(propagator3DTex1);
hipUnbindTexture(propagator3DTex2);
// cudaPrintfEnd();
checkCudaError();
}
void quda::seqSourceFixSinkPart2(QKXTM_Vector &vec, QKXTM_Propagator3D &prop1, int timeslice,int nu,int c2, whatProjector typeProj, whatParticle testParticle){
// cudaPrintfInit();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
//cudaPrintfInit();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, propagator3DTex1, prop1.D_elem(), prop1.Bytes());
hipLaunchKernelGGL(( seqSourceFixSinkPart2_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec.D_elem(), timeslice , nu, c2, typeProj, testParticle );
hipDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
hipUnbindTexture(propagator3DTex1);
// cudaPrintfEnd();
checkCudaError();
}
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////// class QKXTM_Propagator ////////////////////////////////////////////////////////////////
// $$ Section 10: Class QKXTM_Propagator $$
QKXTM_Propagator::QKXTM_Propagator():
propagator_binded_ape(false) , packPropagator_flag(false) , loadPropagator_flag(false)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nSpin*G_nColor*G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Propagator::~QKXTM_Propagator(){
destroy_all();
if(propagator_binded_ape == true) unbindPropagatorAPE();
propagator_binded_ape = false;
}
void QKXTM_Propagator::packPropagator(void *propagator){
if(packPropagator_flag == false){
double *p_propagator = (double*) propagator;
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
h_elem[mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part] = p_propagator[iv*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*2 + nu*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part];
}
printfQuda("Propagator qkxTM packed on gpu form\n");
packPropagator_flag = true;
}
}
void QKXTM_Propagator::loadPropagator(){
if(packPropagator_flag == true && loadPropagator_flag == false){
hipMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), hipMemcpyHostToDevice );
checkCudaError();
loadPropagator_flag = true;
printfQuda("Propagator qkxTM loaded on gpu\n");
}
}
void QKXTM_Propagator::ghostToHost(){ // gpu collect ghost and send it to host
// direction x ////////////////////////////////////
#ifdef MULTI_GPU
if( G_localL[0] < G_totalL[0]){
int position;
int height = G_localL[1] * G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = 2*sizeof(double);
size_t spitch = G_localL[0]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = (G_localL[0]-1)*G_localL[1]*G_localL[2]*G_localL[3];
position = (G_localL[0]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[0]*2 + nu*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[0]*2 + nu*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction y ///////////////////////////////////
if( G_localL[1] < G_totalL[1]){
int position;
int height = G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[1]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*(G_localL[1]-1)*G_localL[2]*G_localL[3];
position = G_localL[0]*(G_localL[1]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[1]*2 + nu*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[1]*2 + nu*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction z //////////////////////////////////
if( G_localL[2] < G_totalL[2]){
int position;
int height = G_localL[3]; // number of blocks that we need
size_t width = G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[2]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*G_localL[1]*(G_localL[2]-1)*G_localL[3];
position = G_localL[0]*G_localL[1]*(G_localL[2]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[2]*2 + nu*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[2]*2 + nu*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
}
// direction t /////////////////////////////////////
if( G_localL[3] < G_totalL[3]){
int position;
int height = G_nSpin*G_nSpin*G_nColor*G_nColor;
size_t width = G_localL[2]*G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[3]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
position = G_localL[0]*G_localL[1]*G_localL[2]*(G_localL[3]-1);
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_minusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
// set minus points to plus area
position = 0;
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_plusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
hipMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,hipMemcpyDeviceToHost);
}
#endif
}
void QKXTM_Propagator::cpuExchangeGhost(){ // cpus exchange links
#ifdef MULTI_GPU
if(comm_size() > 1){
MPI_Request request_recv[2*G_nDim];
MPI_Request request_send[2*G_nDim];
int back_nbr[4] = {X_BACK_NBR,Y_BACK_NBR,Z_BACK_NBR,T_BACK_NBR};
int fwd_nbr[4] = {X_FWD_NBR,Y_FWD_NBR,Z_FWD_NBR,T_FWD_NBR};
double *pointer_receive = NULL;
double *pointer_send = NULL;
// direction x
if(G_localL[0] < G_totalL[0]){
size_t nbytes = G_surface3D[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[0]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[0], 0, &(request_recv[0]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[0], 0, &(request_send[0]));
comm_wait(&(request_recv[0])); // blocking until receive finish
comm_wait(&(request_send[0]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[0]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[0], 1, &(request_recv[1]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[0], 1, &(request_send[1]));
comm_wait(&(request_recv[1])); // blocking until receive finish
comm_wait(&(request_send[1]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction y
if(G_localL[1] < G_totalL[1]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[1]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[1], 2, &(request_recv[2]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[1], 2, &(request_send[2]));
comm_wait(&(request_recv[2])); // blocking until receive finish
comm_wait(&(request_send[2]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[1]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[1], 3, &(request_recv[3]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[1], 3, &(request_send[3]));
comm_wait(&(request_recv[3])); // blocking until receive finish
comm_wait(&(request_send[3]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction z
if(G_localL[2] < G_totalL[2]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[2]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[2], 4, &(request_recv[4]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[2], 4, &(request_send[4]));
comm_wait(&(request_recv[4])); // blocking until receive finish
comm_wait(&(request_send[4]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[2]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[2], 5, &(request_recv[5]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[2], 5, &(request_send[5]));
comm_wait(&(request_recv[5])); // blocking until receive finish
comm_wait(&(request_send[5]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction t
if(G_localL[3] < G_totalL[3]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[3]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[3], 6, &(request_recv[6]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[3], 6, &(request_send[6]));
comm_wait(&(request_recv[6])); // blocking until receive finish
comm_wait(&(request_send[6]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[3]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[3], 7, &(request_recv[7]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[3], 7, &(request_send[7]));
comm_wait(&(request_recv[7])); // blocking until receive finish
comm_wait(&(request_send[7]));
pointer_receive = NULL;
pointer_send = NULL;
}
}
#endif
}
void QKXTM_Propagator::ghostToDevice(){ // simple cudamemcpy to send ghost to device
#ifdef MULTI_GPU
if(comm_size() > 1){
double *host = h_ext_ghost;
double *device = d_elem + G_localVolume*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
hipMemcpy(device,host,bytes_ghost_length,hipMemcpyHostToDevice);
checkCudaError();
}
#endif
}
void QKXTM_Propagator::bindPropagatorAPE(){
if( propagator_binded_ape == false ){
hipBindTexture(0,propagatorTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
propagator_binded_ape = true;
}
void QKXTM_Propagator::unbindPropagatorAPE(){
if(propagator_binded_ape == true){
hipUnbindTexture(propagatorTexAPE);
checkCudaError();
}
propagator_binded_ape = false;
}
void QKXTM_Propagator::rebindPropagatorAPE(){
hipUnbindTexture(propagatorTexAPE);
hipBindTexture(0,propagatorTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
double QKXTM_Propagator::norm2Host(){
double res = 0.;
for(int i = 0 ; i < G_nSpin*G_nSpin*G_nColor*G_nColor*G_localVolume ; i++){
res += h_elem[i*2 + 0]*h_elem[i*2 + 0] + h_elem[i*2 + 1]*h_elem[i*2 + 1];
}
#ifdef MULTI_GPU
double globalRes;
int rc = MPI_Allreduce(&res , &globalRes , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalRes ;
#else
return res;
#endif
}
double QKXTM_Propagator::norm2Device(){
double *h_partial = NULL;
double *d_partial = NULL;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
h_partial = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial == NULL) errorQuda("Error allocate memory for host partial plaq");
hipMalloc((void**)&d_partial, gridDim.x * sizeof(double));
hipBindTexture(0,propagatorTexNorm2,d_elem, Bytes() - BytesGhost() );
hipLaunchKernelGGL(( norm2Propagator_kernel), dim3(gridDim),dim3(blockDim), 0, 0, d_partial);
hipDeviceSynchronize();
hipUnbindTexture(propagatorTexNorm2);
hipMemcpy(h_partial, d_partial , gridDim.x * sizeof(double) , hipMemcpyDeviceToHost);
double norm2 = 0.;
// simple host reduction
for(int i = 0 ; i < gridDim.x ; i++)
norm2 += h_partial[i];
free(h_partial);
hipFree(d_partial);
h_partial = NULL;
d_partial = NULL;
checkCudaError();
#ifdef MULTI_GPU
double globalNorm2;
int rc = MPI_Allreduce(&norm2 , &globalNorm2 , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for norm2");
return globalNorm2 ;
#else
return norm2;
#endif
}
void QKXTM_Propagator::absorbVector(QKXTM_Vector &vec, int nu, int c2){
double *pointProp;
double *pointVec;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointProp = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2;
pointVec = vec.D_elem() + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2;
hipMemcpy(pointProp,pointVec,G_localVolume*2*sizeof(double),hipMemcpyDeviceToDevice);
}
checkCudaError();
}
void QKXTM_Propagator::download(){
hipMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , hipMemcpyDeviceToHost);
checkCudaError();
double *propagator_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(propagator_tmp == NULL)errorQuda("Error in allocate memory of tmp propagator");
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
propagator_tmp[iv*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*2 + nu*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part] = h_elem[mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part];
}
memcpy(h_elem,propagator_tmp,Bytes() - BytesGhost());
free(propagator_tmp);
propagator_tmp = NULL;
}
void QKXTM_Propagator::rotateToPhysicalBasePlus(){
printfQuda("Perform rotation to physical base using + sign\n");
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( rotateToPhysicalBase_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_elem , +1); //kernel
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::rotateToPhysicalBaseMinus(){
printfQuda("Perform rotation to physical base using - sign\n");
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( rotateToPhysicalBase_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_elem , -1); //kernel
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::conjugate(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( conjugate_propagator_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) D_elem() );
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::applyGamma5(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( apply_gamma5_propagator_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) D_elem() );
hipDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::checkSum(){
download();
double *M = H_elem();
double sum_real,sum_imag;
sum_real = 0.;
sum_imag = 0.;
int mu =2;
int nu =0;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =2; nu =1;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =1; nu =2;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =1; nu =0;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =0; nu =2;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =0; nu =1;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
}
//////////////////////////////////////////// class QKXTM_Correlator ////////////
// Section 11: Class QKXTM_Correlator $$
//////////////////////////////////////////////////////////////////////////
QKXTM_Correlator::QKXTM_Correlator()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nSpin;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Correlator::~QKXTM_Correlator(){
destroy_all();
}
void QKXTM_Correlator::download(){
hipMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , hipMemcpyDeviceToHost);
checkCudaError();
double *corr_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(corr_tmp == NULL)errorQuda("Error in allocate memory of tmp correlator");
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int part = 0 ; part < 2 ; part++){
corr_tmp[iv*G_nSpin*G_nSpin*2 + mu*G_nSpin*2 + nu*2 + part] = h_elem[mu*G_nSpin*G_localVolume*2 + nu*G_localVolume*2 + iv*2 + part];
}
memcpy(h_elem,corr_tmp,Bytes() - BytesGhost());
free(corr_tmp);
corr_tmp = NULL;
}
// spatial volume reduction ( first try with only zero momentum)
void QKXTM_Correlator::fourierCorr(double *corrMom, int Nmom , int momElem[][3]){
// corrMom must be allocated with G_localL[3]*Nmom*4*4*2
// slowest is time then momentum then gamma then gamma1 then r,i
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, correlationTex, d_elem, Bytes() );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(4*4*gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
hipMalloc((void**)&d_partial_block, 4*4*gridDim.x*2 * sizeof(double) );
double reduction[4*4*2];
double globalReduction[4*4*2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
hipLaunchKernelGGL(( fourierCorr_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
hipDeviceSynchronize();
hipMemcpy(h_partial_block , d_partial_block , 4*4*gridDim.x*2 * sizeof(double) , hipMemcpyDeviceToHost);
memset(reduction , 0 , 4*4*2 * sizeof(double) );
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[gamma*4*2 + gamma1*2 + 0] += h_partial_block[gamma*4*gridDim.x*2 + gamma1*gridDim.x*2 + i*2 + 0];
reduction[gamma*4*2 + gamma1*2 + 1] += h_partial_block[gamma*4*gridDim.x*2 + gamma1*gridDim.x*2 + i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 4*4*2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
corrMom[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 0] = globalReduction[gamma*4*2 + gamma1*2 + 0];
corrMom[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 1] = globalReduction[gamma*4*2 + gamma1*2 + 1];
}
}
} // for all momenta
} // for all local timeslice
hipUnbindTexture(correlationTex);
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
void QKXTM_Correlator::packCorrelator(void *corr){
double *p_corr = (double*) corr;
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int part = 0 ; part < 2 ; part++){
h_elem[mu*G_nSpin*G_localVolume*2 + nu*G_localVolume*2 + iv*2 + part] = p_corr[iv*G_nSpin*G_nSpin*2 + mu*G_nSpin*2 + nu*2 + part];
}
printfQuda("Correlator qkxTM packed on gpu form\n");
}
void QKXTM_Correlator::loadCorrelator(){
hipMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), hipMemcpyHostToDevice );
checkCudaError();
printfQuda("Correlator qkxTM loaded on gpu\n");
}
//////////////////////////////////////////////////////// Contractions ///////////////////////////////////////
void quda::corrProton(QKXTM_Propagator &uprop, QKXTM_Propagator &dprop ,QKXTM_Correlator &corr){
printfQuda("Perform contractions for Proton\n");
hipBindTexture(0,propagatorTexOne,uprop.D_elem(),uprop.Bytes()); // one will be up prop
hipBindTexture(0,propagatorTexTwo,dprop.D_elem(),dprop.Bytes()); // two will be down prop
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( contract_Type1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) corr.D_elem() );
hipDeviceSynchronize();
hipUnbindTexture(propagatorTexOne);
hipUnbindTexture(propagatorTexTwo);
checkCudaError();
}
void quda::corrNeutron(QKXTM_Propagator &uprop, QKXTM_Propagator &dprop ,QKXTM_Correlator &corr){
printfQuda("Perform contractions for Neutron\n");
hipBindTexture(0,propagatorTexOne,dprop.D_elem(),dprop.Bytes());
hipBindTexture(0,propagatorTexTwo,uprop.D_elem(),uprop.Bytes());
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( contract_Type1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) corr.D_elem() );
hipDeviceSynchronize();
hipUnbindTexture(propagatorTexOne);
hipUnbindTexture(propagatorTexTwo);
checkCudaError();
}
//////////////// New //////////////////
void quda::corrPion(QKXTM_Propagator &prop, double *corr){
printfQuda("Perform contractions for Pion");
hipBindTexture(0,propagatorTexOne,prop.D_elem(),prop.Bytes());
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( contract_twop_pion_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) corr);
hipDeviceSynchronize();
hipUnbindTexture(propagatorTexOne);
checkCudaError();
}
void quda::performContractionsPion(QKXTM_Propagator &prop, int Nmom, int momElem[][3] , char *filenamePion){
if(init_qudaQKXTM == false)errorQuda("You must initialize qudaQKXTM first");
FILE *filePion;
if(comm_rank() == 0){
filePion = fopen(filenamePion,"w");
if(filePion == NULL){
fprintf(stderr,"Error open file paths for writting\n");
comm_exit(-1);
}
}
QKXTM_Field *field = new QKXTM_Field();
double *d_corr;
hipMalloc((void**)&d_corr,field->Bytes());
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
corrPion(prop, d_corr);
field->fourierCorr(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(filePion,"%d %+d %+d %+d \t %+e %+e\n",it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
comm_barrier();
if(comm_rank() == 0)
fclose(filePion);
delete field;
hipFree(d_corr);
free(corr_fourier);
free(corr_fourier_full);
}
/////////////////////////////////////////
void quda::performContractions(QKXTM_Propagator &uprop, QKXTM_Propagator &dprop , int Nmom, int momElem[][3] , char *filenameProton , char *filenameNeutron){
if(init_qudaQKXTM == false)errorQuda("You must initialize qudaQKXTM first");
FILE *fileProton, *fileNeutron;
if(comm_rank() == 0){
fileProton = fopen(filenameProton,"w");
fileNeutron = fopen(filenameNeutron,"w");
if(fileProton == NULL || fileNeutron == NULL){
fprintf(stderr,"Error open file paths for writting\n");
comm_exit(-1);
}
}
QKXTM_Correlator *corr = new QKXTM_Correlator();
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*4*4*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*4*4*2,sizeof(double));
corrProton(uprop,dprop,*corr);
corr->fourierCorr(corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(fileProton,"%d %+d %+d %+d %d %d \t %+e %+e\n",it,momElem[imom][0],momElem[imom][1],momElem[imom][2],gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 0] , corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
corrNeutron(uprop,dprop,*corr);
corr->fourierCorr(corr_fourier,Nmom,momElem);
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(fileNeutron,"%d %+d %+d %+d %d %d \t %+e %+e\n",it,momElem[imom][0],momElem[imom][1],momElem[imom][2],gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 0] , corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
delete corr;
free(corr_fourier);
free(corr_fourier_full);
}
void quda::fixSinkFourier(double *corr,double *corrMom, int Nmom , int momElem[][3]){
// corrMom must be allocated with G_localL[3]*Nmom*2
// slowest is time then momentum then r,i
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, correlationTex, corr, G_localVolume*2*sizeof(double) );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
hipMalloc((void**)&d_partial_block, gridDim.x*2 * sizeof(double) );
double reduction[2];
double globalReduction[2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
hipLaunchKernelGGL(( fourierCorr_kernel2), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
hipDeviceSynchronize();
hipMemcpy(h_partial_block , d_partial_block , gridDim.x*2 * sizeof(double) , hipMemcpyDeviceToHost);
memset(reduction , 0 , 2 * sizeof(double) );
for(int i =0 ; i < gridDim.x ; i++){
reduction[0] += h_partial_block[i*2 + 0];
reduction[1] += h_partial_block[i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
corrMom[it*Nmom*2 + imom*2 + 0] = globalReduction[0];
corrMom[it*Nmom*2 + imom*2 + 1] = globalReduction[1];
}
} // for all momenta
} // for all local timeslice
hipUnbindTexture(correlationTex);
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
void quda::fixSinkContractions(QKXTM_Propagator &seqProp, QKXTM_Propagator &prop , QKXTM_Gauge &gauge,whatProjector typeProj , char *filename , int Nmom , int momElem[][3] , whatParticle testParticle, int partFlag){
if(typeProj == QKXTM_TYPE1)
sprintf(filename,"%s_%s",filename,"type1");
else if (typeProj == QKXTM_TYPE2)
sprintf(filename,"%s_%s",filename,"type2");
else if (typeProj == QKXTM_PROJ_G5G1)
sprintf(filename,"%s_%s",filename,"G5G1");
else if (typeProj == QKXTM_PROJ_G5G2)
sprintf(filename,"%s_%s",filename,"G5G2");
else if (typeProj == QKXTM_PROJ_G5G3)
sprintf(filename,"%s_%s",filename,"G5G3");
if(testParticle == QKXTM_PROTON)
sprintf(filename,"%s_%s",filename,"proton");
else
sprintf(filename,"%s_%s",filename,"neutron");
char filename_local[257] , filename_noether[257] , filename_oneD[257];
sprintf(filename_local,"%s_%s.dat",filename,"local");
sprintf(filename_noether,"%s_%s.dat",filename,"noether");
sprintf(filename_oneD,"%s_%s.dat",filename,"oneD");
FILE *fileLocal , *fileNoether , *fileOneD;
if(comm_rank() == 0){
fileLocal = fopen(filename_local,"w");
fileNoether = fopen(filename_noether,"w");
fileOneD = fopen(filename_oneD, "w");
if(fileLocal == NULL || fileOneD == NULL){
fprintf(stderr,"Error open file for writting\n");
comm_exit(-1);
}
}
seqProp.applyGamma5();
seqProp.conjugate();
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
// holds correlator in position space
double *d_corr;
hipError_t error;
error = hipMalloc((void**)&d_corr, G_localVolume*2*sizeof(double));
if(error != hipSuccess)errorQuda("Error allocate device memory for correlator");
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
// to speed up contraction we use texture binding for seq-prop and prop
hipBindTexture(0, seqPropagatorTex, seqProp.D_elem(), seqProp.Bytes());
hipBindTexture(0, fwdPropagatorTex, prop.D_elem(), prop.Bytes());
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
for(int iflag = 0 ; iflag < 10 ; iflag++){
hipLaunchKernelGGL(( fixSinkContractions_local_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_corr , iflag, testParticle, partFlag);
hipDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileLocal,"%d %d %+d %+d %+d \t %+e %+e\n",iflag,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// communication
hipBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
gauge.ghostToHost();
gauge.cpuExchangeGhost(); // communicate gauge
gauge.ghostToDevice();
comm_barrier(); // just in case
prop.ghostToHost();
prop.cpuExchangeGhost(); // communicate forward propagator
prop.ghostToDevice();
comm_barrier(); // just in case
seqProp.ghostToHost();
seqProp.cpuExchangeGhost(); // communicate sequential propagator
seqProp.ghostToDevice();
comm_barrier(); // just in case
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// +++++++++++++++++++ conserved current +++++++++++++++++++++++++++++++++++++++++//
// mapping gamma indices
// g1 , g2 , g3 , g4
// 0 , 1 , 2 , 3
for(int idir = 0 ; idir < 4 ; idir++){
hipLaunchKernelGGL(( fixSinkContractions_noether_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_corr , idir, testParticle, partFlag);
hipDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileNoether,"%d %d %+d %+d %+d \t %+e %+e\n",idir,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
}
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// +++++++++++++++++++ derivative operators ++++++++++++++++++++++++++++++++//
// for derivative operators we have for gamma matrices g1,g2,g3,g4 ,g5g1,g5g2,g5g3,g5g4 => 4+4 combinations
// for derivative index we have 4 index D^0 , D^1 , D^2 , D^3
// for total we have 8*4=32 combinations
// mapping gamma indices, (derivative will have a seperate index)
// g1 , g2 , g3 , g4 , g5g1 , g5g2 , g5g3 , g5g4
// 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7
// cudaPrintfInit();
for(int iflag = 0 ; iflag < 8 ; iflag++){ // iflag perform loop over gammas
for(int dir = 0 ; dir < 4 ; dir++){
hipLaunchKernelGGL(( fixSinkContractions_oneD_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_corr , iflag, dir , testParticle, partFlag);
hipDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileOneD,"%d %d %d %+d %+d %+d \t %+e %+e\n",iflag,dir,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
}
}
// if(comm_rank() == 0) cudaPrintfDisplay(stdout,true);
// cudaPrintfEnd();
// ------------------------------------------------------------------------------------------
hipUnbindTexture(seqPropagatorTex);
hipUnbindTexture(fwdPropagatorTex);
hipUnbindTexture(gaugeDerivativeTex);
hipFree(d_corr);
checkCudaError();
free(corr_fourier_full);
free(corr_fourier);
if(comm_rank() == 0){
fclose(fileLocal);
fclose(fileNoether);
fclose(fileOneD);
}
}
void quda::fixSinkContractions_nonLocal(QKXTM_Propagator &seqProp, QKXTM_Propagator &prop , QKXTM_Gauge &gauge,whatProjector typeProj , char *filename , int Nmom , int momElem[][3] , whatParticle testParticle, int partFlag, double *deviceWilsonPath, double *deviceWilsonPathBwd,int direction){
if(typeProj == QKXTM_TYPE1)
sprintf(filename,"%s_%s",filename,"type1");
else if (typeProj == QKXTM_TYPE2)
sprintf(filename,"%s_%s",filename,"type2");
else if (typeProj == QKXTM_PROJ_G5G1)
sprintf(filename,"%s_%s",filename,"G5G1");
else if (typeProj == QKXTM_PROJ_G5G2)
sprintf(filename,"%s_%s",filename,"G5G2");
else if (typeProj == QKXTM_PROJ_G5G3)
sprintf(filename,"%s_%s",filename,"G5G3");
if(testParticle == QKXTM_PROTON)
sprintf(filename,"%s_%s",filename,"proton");
else
sprintf(filename,"%s_%s",filename,"neutron");
char filename_nonLocal[257];
sprintf(filename_nonLocal,"%s_%s.dat",filename,"nonLocal");
FILE *fileNonLocal;
if(comm_rank() == 0){
fileNonLocal = fopen(filename_nonLocal,"w");
if(fileNonLocal == NULL){
fprintf(stderr,"Error open file for writting\n");
comm_exit(-1);
}
}
seqProp.applyGamma5();
seqProp.conjugate();
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
// holds correlator in position space
double *d_corr;
hipError_t error;
error = hipMalloc((void**)&d_corr, G_localVolume*2*sizeof(double));
if(error != hipSuccess)errorQuda("Error allocate device memory for correlator");
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
double *corr_fourier_bwd = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full_bwd = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
// to speed up contraction we use texture binding for seq-prop and prop
hipBindTexture(0, seqPropagatorTex, seqProp.D_elem(), seqProp.Bytes());
hipBindTexture(0, fwdPropagatorTex, prop.D_elem(), prop.Bytes());
hipBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
for(int dl = 0 ; dl < G_totalL[direction]/2 ; dl++){
//// fwd direction /////
hipLaunchKernelGGL(( fixSinkContractions_nonLocal_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_corr ,(double2*) deviceWilsonPath, dl, testParticle, partFlag,direction);
hipDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
/////////////
///// bwd direction ////////
hipLaunchKernelGGL(( fixSinkContractions_nonLocalBwd_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_corr ,(double2*) deviceWilsonPathBwd, dl, testParticle, partFlag,direction);
hipDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier_bwd,Nmom,momElem);
error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier_bwd,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full_bwd,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
//////////////
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileNonLocal,"%d %d %+d %+d %+d \t %+e %+e \t %+e %+e\n",dl,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],
corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1],
corr_fourier_full_bwd[it*Nmom*2 + imom*2 + 0] , corr_fourier_full_bwd[it*Nmom*2 + imom*2 + 1]);
}
}
}
// ------------------------------------------------------------------------------------------
hipUnbindTexture(seqPropagatorTex);
hipUnbindTexture(fwdPropagatorTex);
hipUnbindTexture(gaugeDerivativeTex);
hipFree(d_corr);
checkCudaError();
free(corr_fourier_full);
free(corr_fourier);
free(corr_fourier_full_bwd);
free(corr_fourier_bwd);
if(comm_rank() == 0){
fclose(fileNonLocal);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 12: Class QKXTM_Propagator3D $$
/////////////////////////////////////////////////////// class Propagator 3D /////////
QKXTM_Propagator3D::QKXTM_Propagator3D()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
field_length = G_nSpin*G_nSpin*G_nColor*G_nColor;
bytes_total_length = G_localL[0]*G_localL[1]*G_localL[2]*field_length*2*sizeof(double);
create_host();
create_device();
zero();
}
QKXTM_Propagator3D::~QKXTM_Propagator3D(){
destroy_host();
destroy_device();
}
void QKXTM_Propagator3D::absorbTimeSlice(QKXTM_Propagator &prop, int timeslice){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
pointer_dst = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2;
pointer_src = prop.D_elem() + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + timeslice*G_localVolume3D*2;
hipMemcpy(pointer_dst, pointer_src, G_localVolume3D*2*sizeof(double), hipMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Propagator3D::absorbVectorTimeSlice(QKXTM_Vector &vec, int timeslice, int nu , int c2){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2;
pointer_src = vec.D_elem() + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + timeslice*G_localVolume3D*2;
hipMemcpy(pointer_dst, pointer_src, G_localVolume3D*2 * sizeof(double), hipMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Propagator3D::download(){
hipMemcpy(h_elem,d_elem,Bytes() , hipMemcpyDeviceToHost);
checkCudaError();
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
double *propagator3D_tmp = (double*) malloc( Bytes() );
if(propagator3D_tmp == NULL)errorQuda("Error in allocate memory of tmp propagator");
for(int iv = 0 ; iv < G_localVolume3D ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
propagator3D_tmp[iv*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*2 + nu*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part] = h_elem[mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2 + iv*2 + part];
}
memcpy(h_elem,propagator3D_tmp,Bytes() );
free(propagator3D_tmp);
propagator3D_tmp = NULL;
}
void QKXTM_Propagator3D::justCopyToHost(){
hipMemcpy(H_elem() , D_elem() , Bytes() , hipMemcpyDeviceToHost);
checkCudaError();
}
void QKXTM_Propagator3D::justCopyToDevice(){
hipMemcpy(D_elem() , H_elem() , Bytes() , hipMemcpyHostToDevice);
checkCudaError();
}
void QKXTM_Propagator3D::broadcast(int tsink){
justCopyToHost(); // transfer data to host so we can communicate
comm_barrier();
int bcastRank = tsink/G_localL[3];
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
int error = MPI_Bcast(H_elem() , 4*4*3*3*G_localVolume3D*2 , MPI_DOUBLE , bcastRank , G_timeComm ); // broadcast the data from node that has the tsink to other nodes
if(error != MPI_SUCCESS)errorQuda("Error in mpi broadcasting");
justCopyToDevice();
}
// $$ Section 13: Class QKXTM_Vector3D $$
//////////////////////////////////////////////////// class Vector3D
QKXTM_Vector3D::QKXTM_Vector3D()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume/G_localL[3] + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_host();
create_device();
zero();
}
QKXTM_Vector3D::~QKXTM_Vector3D(){
destroy_host();
destroy_device();
}
void QKXTM_Vector3D::absorbTimeSlice(QKXTM_Vector &vec, int timeslice){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nColor*G_localVolume3D*2 + c1*G_localVolume3D*2;
pointer_src = vec.D_elem() + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + timeslice*G_localVolume3D*2;
hipMemcpy(pointer_dst, pointer_src, G_localVolume3D*2*sizeof(double), hipMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Vector3D::justCopyToHost(){
hipMemcpy(H_elem() , D_elem() , Bytes() , hipMemcpyDeviceToHost);
checkCudaError();
}
void QKXTM_Vector3D::justCopyToDevice(){
hipMemcpy(D_elem() , H_elem() , Bytes() , hipMemcpyHostToDevice);
checkCudaError();
}
void QKXTM_Vector3D::download(){
hipMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , hipMemcpyDeviceToHost);
checkCudaError();
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
double *vector_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(vector_tmp == NULL)errorQuda("Error in allocate memory of tmp vector");
for(int iv = 0 ; iv < G_localVolume3D ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int part = 0 ; part < 2 ; part++){
vector_tmp[iv*G_nSpin*G_nColor*2 + mu*G_nColor*2 + c1*2 + part] = h_elem[mu*G_nColor*G_localVolume3D*2 + c1*G_localVolume3D*2 + iv*2 + part];
}
memcpy(h_elem,vector_tmp,Bytes() - BytesGhost());
free(vector_tmp);
vector_tmp = NULL;
}
void QKXTM_Vector3D::broadcast(int tsink){
justCopyToHost(); // transfer data to host so we can communicate
comm_barrier();
int bcastRank = tsink/G_localL[3];
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
int error = MPI_Bcast(H_elem() , 4*3*G_localVolume3D*2 , MPI_DOUBLE , bcastRank , G_timeComm ); // broadcast the data from node that has the tsink to other nodes
if(error != MPI_SUCCESS)errorQuda("Error in mpi broadcasting");
justCopyToDevice();
}
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
// $$ Section 14: Stochastic Connected Diagrams $$
// Contents
// 1) insLineFourier
// 2) write_3pf_local
// 3) write_3pf_oneD
// 4) QKXTM_Vector3D::fourier
// 5) partialContract3pf_upart_proton
// 6) partialContract3pf_upart_neutron
// 7) partialContract3pf_dpart_neutron
// 8) partialContract3pf_dpart_proton
// 9) finalize_contract3pf_mixLevel
// 10) finalize_contract3pf_oneLevel
// 11) threepStochUpart
// 12) threepStochDpart
///////////////////////////////////////////// functions for stochastic three point functions //////////
#define WRITE_BINARY
QKXTM_VectorX8::QKXTM_VectorX8()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = 8*G_nSpin*G_nColor;
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_VectorX8::~QKXTM_VectorX8(){
destroy_all();
}
void quda::insLineFourier(double *insLineMom , double *insLine, int Nmom , int momElem[][3]){
// insLineMom time,spin,color
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, insLineFourierTex, insLine, 4*3*G_localVolume*2*sizeof(double) );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(4*3*gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
hipMalloc((void**)&d_partial_block, 4*3*gridDim.x*2 * sizeof(double) );
double reduction[4*3*2];
double globalReduction[4*3*2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
hipLaunchKernelGGL(( fourierCorr_kernel3), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block ,it , momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // future include mom here
hipDeviceSynchronize();
hipMemcpy(h_partial_block , d_partial_block , 4*3*gridDim.x*2 * sizeof(double) , hipMemcpyDeviceToHost);
memset(reduction , 0 , 4*3*2 * sizeof(double) );
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[gamma*3*2 + c1*2 + 0] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 0];
reduction[gamma*3*2 + c1*2 + 1] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 4*3*2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++){
insLineMom[it*Nmom*4*3*2 + imom*4*3*2 + gamma*3*2 + c1*2 + 0] = globalReduction[gamma*3*2 + c1*2 + 0];
insLineMom[it*Nmom*4*3*2 + imom*4*3*2 + gamma*3*2 + c1*2 + 1] = globalReduction[gamma*3*2 + c1*2 + 1];
}
}
}
} // for all local timeslice
hipUnbindTexture(insLineFourierTex);
free(h_partial_block);
hipFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
// we must calculate insetion line for all the operators
#define MAX_PARTICLES 18
/*
static void write_3pf_local(FILE *file_ptr, double *results, int iflag , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int imom2 = 0 ; imom2 < Nmom ; imom2++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %+d %+d %+d %+d %+d %+d %d %d \t %+e %+e\n",iflag,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
momElem[imom2][0],momElem[imom2][1],momElem[imom2][2],
gamma,gamma1,corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 + imom2*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 +imom2*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_oneD(FILE *file_ptr, double *results, int iflag ,int dir , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int imom2 = 0 ; imom2 < Nmom ; imom2++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d %+d %+d %+d %d %d \t %+e %+e\n",iflag,dir,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
momElem[imom2][0],momElem[imom2][1],momElem[imom2][2],
gamma,gamma1,corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 + imom2*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 +imom2*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
*/
static void write_3pf_Nonlocal_zeroMomIns(FILE *file_ptr, double *results,int dir, int iflag , int NmomSink , int momElemSink[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*NmomSink*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*NmomSink*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*NmomSink*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d %d %d \t %+e %+e\n",dir,iflag,it,momElemSink[imom1][0],momElemSink[imom1][1],momElemSink[imom1][2],
gamma,gamma1,corr_fourier_full[it*NmomSink*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*NmomSink*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_local_zeroMomSink(FILE *file_ptr, double *results, int iflag , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
#ifdef WRITE_BINARY
fwrite((void*) corr_fourier_full, sizeof(double) , G_totalL[3]*Nmom*4*4*2,file_ptr);
#else
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %+d %+d %+d %d %d \t %+e %+e\n",iflag,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
#endif
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_oneD_zeroMomSink(FILE *file_ptr, double *results, int iflag ,int dir , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
#ifdef WRITE_BINARY
fwrite((void*) corr_fourier_full, sizeof(double) , G_totalL[3]*Nmom*4*4*2,file_ptr);
#else
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d %d %d \t %+e %+e\n",iflag,dir,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
#endif
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_Nonlocal_Pion_zeroMomIns(FILE *file_ptr, double *results,int dir, int iflag , int NmomSink , int momElemSink[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*NmomSink*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*NmomSink*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*NmomSink*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d \t %+e %+e\n",dir,iflag,it,momElemSink[imom1][0],momElemSink[imom1][1],momElemSink[imom1][2],
corr_fourier_full[it*NmomSink*2 + imom1*2 + 0] ,
corr_fourier_full[it*NmomSink*2 + imom1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
void QKXTM_Vector3D::fourier(double *vecMom, int Nmom , int momElem[][3]){
// vecMom must be allocated with Nmom*4*3*2
// slowest is momentum then gamma then c1 then r,i
// hipError_t error;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
hipBindTexture(0, correlationTex, d_elem, Bytes() );
//if(error != hipSuccess)fprintf(stderr,"Error bind texture\n");
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(4*3*gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
hipMalloc((void**)&d_partial_block, 4*3*gridDim.x*2 * sizeof(double) );
//if(error != hipSuccess)fprintf(stderr,"Error malloc\n");
double reduction[4*3*2];
double globalReduction[4*3*2];
for(int imom = 0 ; imom < Nmom ; imom++){
hipLaunchKernelGGL(( fourierCorr_kernel4), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_partial_block ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
hipDeviceSynchronize();
hipMemcpy(h_partial_block , d_partial_block , 4*3*gridDim.x*2 * sizeof(double) , hipMemcpyDeviceToHost);
//if(error != hipSuccess)fprintf(stderr,"Error memcpy\n");
memset(reduction , 0 , 4*3*2 * sizeof(double) );
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[gamma*3*2 + c1*2 + 0] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 0];
reduction[gamma*3*2 + c1*2 + 1] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 4*3*2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++){
vecMom[ imom*4*3*2 + gamma*3*2 + c1*2 + 0] = globalReduction[gamma*3*2 + c1*2 + 0];
vecMom[ imom*4*3*2 + gamma*3*2 + c1*2 + 1] = globalReduction[gamma*3*2 + c1*2 + 1];
}
}
} // for all momenta
hipUnbindTexture(correlationTex);
free(h_partial_block);
hipFree(d_partial_block);
//if(error != hipSuccess)fprintf(stderr,"Error cuda free\n");
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
static void partialContract3pf_upart_pion(double *pion_level, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
// cudaPrintfInit ();
hipLaunchKernelGGL(( partial_Contract3pf_pion_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem() , 0);
// cudaPrintfDisplay (stdout, true);
//cudaPrintfEnd ();
hipDeviceSynchronize();
checkCudaError();
vec3D.fourier(pion_level,Nmom,momElem);
}
static void partialContract3pf_dpart_pion(double *pion_level, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( partial_Contract3pf_pion_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem() , 1);
hipDeviceSynchronize();
checkCudaError();
vec3D.fourier(pion_level,Nmom,momElem);
}
static void partialContract3pf_upart_proton(double *proton_level1,double *proton_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( partial_lvl1_Contract3pf_Type1_1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem() , 1 , 2);
hipDeviceSynchronize();
checkCudaError();
vec3D.fourier(proton_level1,Nmom,momElem);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
hipLaunchKernelGGL(( partial_lvl3_Contract3pf_Type1_1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem(),gamma,gamma1, 1 , 2);
hipDeviceSynchronize();
checkCudaError();
double *ptr = proton_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void partialContract3pf_upart_neutron(double *neutron_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
hipLaunchKernelGGL(( partial_lvl3_Contract3pf_Type1_2_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem(),gamma,gamma1, 2);
hipDeviceSynchronize();
checkCudaError();
double *ptr = neutron_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void partialContract3pf_dpart_neutron(double *neutron_level1,double *neutron_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
hipLaunchKernelGGL(( partial_lvl1_Contract3pf_Type1_1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem() , 2 , 1);
hipDeviceSynchronize();
checkCudaError();
vec3D.fourier(neutron_level1,Nmom,momElem);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
hipLaunchKernelGGL(( partial_lvl3_Contract3pf_Type1_1_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem(),gamma,gamma1, 2 , 1);
hipDeviceSynchronize();
checkCudaError();
double *ptr = neutron_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void partialContract3pf_dpart_proton(double *proton_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
hipLaunchKernelGGL(( partial_lvl3_Contract3pf_Type1_2_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) vec3D.D_elem(),gamma,gamma1, 1);
hipDeviceSynchronize();
checkCudaError();
double *ptr = proton_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void finalize_contract3pf_mixLevel(Complex *res,Complex *Iins, Complex *lvl3, Complex *lvl1, int Nmom, int momElem[][3]){
memset(res,0,G_localL[3]*Nmom*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++){
res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*Nmom*4*3 + imom1*4*3 + gamma1*3 + color] * lvl1[gamma*3 + color];
for(int spin = 0 ; spin < 4 ; spin++){
res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*Nmom*4*3 + imom1*4*3 + spin*3 + color] * lvl3[gamma*4*4*3 + gamma1*4*3 + spin*3 + color];
}
}
}
}
}
static void finalize_contract3pf_oneLevel(Complex *res,Complex *Iins, Complex *lvl3, int Nmom, int momElem[][3]){
memset(res,0,G_localL[3]*Nmom*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++)
for(int spin = 0 ; spin < 4 ; spin++){
res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*Nmom*4*3 + imom1*4*3 + spin*3 + color] * lvl3[gamma*4*4*3 + gamma1*4*3 + spin*3 + color];
}
}
}
}
static void finalize_contract3pf_mixLevel_SinkMom(Complex *res,Complex *Iins, Complex *lvl3, Complex *lvl1, int NmomSink, int momElemSink[][3]){
memset(res,0,G_localL[3]*NmomSink*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++){
res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*4*3 + gamma1*3 + color] * lvl1[ imom1*4*3 + gamma*3 + color];
for(int spin = 0 ; spin < 4 ; spin++){
res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*4*3 + spin*3 + color] * lvl3[gamma*4*NmomSink*4*3 + gamma1*NmomSink*4*3 + imom1*4*3 + spin*3 + color];
}
}
}
}
}
static void finalize_contract3pf_oneLevel_SinkMom(Complex *res,Complex *Iins, Complex *lvl3, int NmomSink, int momElemSink[][3]){
memset(res,0,G_localL[3]*NmomSink*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++)
for(int spin = 0 ; spin < 4 ; spin++){
res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*4*3 + spin*3 + color] * lvl3[gamma*4*NmomSink*4*3 + gamma1*NmomSink*4*3 + imom1*4*3 + spin*3 + color];
}
}
}
}
static void finalize_contract3pf_Pion_SinkMom(Complex *res,Complex *Iins, Complex *lvl, int NmomSink, int momElemSink[][3]){
memset(res,0,G_localL[3]*NmomSink*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
for(int color = 0 ; color < 3 ; color++)
for(int spin = 0 ; spin < 4 ; spin++)
res[it*NmomSink + imom1] = res[it*NmomSink + imom1] + Iins[it*4*3 + spin*3 + color] * lvl[imom1*4*3 + spin*3 + color];
}
}
//#define NEW_VERSION
void quda::threepStochUpart( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &uprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D, QKXTM_Gauge &gauge, int fixTime , char *filename ,int Nmom , int momElem[][3]){
// ! fix time is the absolute sink time
int NmomSink = 1;
int momElemSink[1][3];
momElemSink[0][0] = 0;
momElemSink[0][1] = 0;
momElemSink[0][2] = 0;
char particles_filename[MAX_PARTICLES][257];
char particles_filename_noether[MAX_PARTICLES][257];
char particles_filename_oneD[MAX_PARTICLES][257];
FILE *file_local[MAX_PARTICLES];
FILE *file_noether[MAX_PARTICLES];
FILE *file_oneD[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_local.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_local.dat");
sprintf(particles_filename_noether[0],"%s_%s",filename,"proton_noether.dat");
sprintf(particles_filename_noether[1],"%s_%s",filename,"neutron_noether.dat");
sprintf(particles_filename_oneD[0],"%s_%s",filename,"proton_oneD.dat");
sprintf(particles_filename_oneD[1],"%s_%s",filename,"neutron_oneD.dat");
if(comm_rank() == 0){
#ifdef WRITE_BINARY
file_local[0] = fopen(particles_filename[0],"ab");
file_local[1] = fopen(particles_filename[1],"ab");
file_noether[0] = fopen(particles_filename_noether[0],"ab");
file_noether[1] = fopen(particles_filename_noether[1],"ab");
file_oneD[0] = fopen(particles_filename_oneD[0],"ab");
file_oneD[1] = fopen(particles_filename_oneD[1],"ab");
#else
file_local[0] = fopen(particles_filename[0],"a");
file_local[1] = fopen(particles_filename[1],"a");
file_noether[0] = fopen(particles_filename_noether[0],"a");
file_noether[1] = fopen(particles_filename_noether[1],"a");
file_oneD[0] = fopen(particles_filename_oneD[0],"a");
file_oneD[1] = fopen(particles_filename_oneD[1],"a");
#endif
if(file_local[0] == NULL || file_local[1] == NULL || file_oneD[0] == NULL || file_oneD[1] == NULL || file_noether[0] == NULL || file_noether[1] == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
// QKXTM_Vector3D *levelVec = new QKXTM_Vector3D[8];
// QKXTM_Vector3D *levelVec = malloc(8*sizeof(QKXTM_Vector3D));
//levelVec[0]->QKXTM_Vector3D();
hipBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
hipBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
hipBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *proton_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(proton_level1 == NULL || proton_level3 == NULL || neutron_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_upart_proton(proton_level1,proton_level3,*levelVec,NmomSink,momElemSink);
partialContract3pf_upart_neutron(neutron_level3,*levelVec,NmomSink,momElemSink);
hipUnbindTexture(xiVector3DStochTex);
hipUnbindTexture(uprop3DStochTex);
hipUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(10*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineNoetherMom = (double*) malloc(4*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineOneDMom = (double*) malloc(8*4*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL || insLineOneDMom == NULL || insLineNoetherMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
hipBindTexture(0, propStochTex, uprop.D_elem(), uprop.Bytes());
hipBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int iflag = 0 ; iflag < 10 ; iflag++){
hipLaunchKernelGGL(( insLine_local_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine->D_elem() , iflag , 1); // (1,2,3) (upart,dpart,spart)
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// communication
hipBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
gauge.ghostToHost();
gauge.cpuExchangeGhost(); // communicate gauge
gauge.ghostToDevice();
comm_barrier(); // just in case
uprop.ghostToHost();
uprop.cpuExchangeGhost(); // communicate propagator
uprop.ghostToDevice();
comm_barrier(); // just in case
phi.ghostToHost();
phi.cpuExchangeGhost(); // communicate stochastic vector
phi.ghostToDevice();
comm_barrier(); // just in case
// +++++++++++++++++++ conserved current ++++++++++++++++++++++++++++++++//
// mapping gamma
// g1 , g2 , g3 , g4
// 0 , 1 , 2 , 3
for(int idir = 0 ; idir < 4 ; idir++){
hipLaunchKernelGGL(( insLine_noether_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine->D_elem() , idir);
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// +++++++++++++++++++ derivative operators ++++++++++++++++++++++++++++++++//
// for derivative operators we have for gamma matrices g1,g2,g3,g4 ,g5g1,g5g2,g5g3,g5g4 => 4+4 combinations
// for derivative index we have 4 index D^0 , D^1 , D^2 , D^3
// for total we have 8*4=32 combinations
// mapping gamma indices, (derivative will have a seperate index)
// g1 , g2 , g3 , g4 , g5g1 , g5g2 , g5g3 , g5g4
// 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7
//#ifdef NEW_VERSION
QKXTM_VectorX8 *insLineX8 = new QKXTM_VectorX8();
for(int dir = 0 ; dir < 4 ; dir++){
hipLaunchKernelGGL(( insLine_oneD_kernel_new), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLineX8->D_elem(), dir);
hipDeviceSynchronize();
checkCudaError();
for(int iflag = 0 ; iflag < 8 ; iflag++){
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2, insLineX8->D_elem() + iflag*G_nSpin*G_nColor*G_localVolume*2, Nmom, momElem);
hipDeviceSynchronize();
checkCudaError();
}
}
delete insLineX8;
//#else
/*
for(int iflag = 0 ; iflag < 8 ; iflag++) // iflag perform loop over gammas
for(int dir = 0 ; dir < 4 ; dir++){
// need to find a way to improve it
insLine_oneD_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , iflag ,dir); // we dont need part here because operators are vector , axial
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
*/
delete insLine;
hipUnbindTexture(gaugeDerivativeTex);
printfQuda("Finish insertion line\n");
hipUnbindTexture(phiVectorStochTex);
hipUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*Nmom*4*4*2*sizeof(double));
// write local
for(int iflag = 0 ; iflag < 10 ; iflag++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[0],res,iflag,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[1],res,iflag,Nmom,momElem);
}
// write noether
for(int idir = 0 ; idir < 4 ; idir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[0],res,idir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[1],res,idir,Nmom,momElem);
}
// write derivatives
for(int iflag = 0 ; iflag < 8 ; iflag++)
for(int dir = 0 ; dir < 4 ; dir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[0],res,iflag,dir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[1],res,iflag,dir,Nmom,momElem);
}
free(res);
if(comm_rank()==0){
fclose(file_local[0]);
fclose(file_local[1]);
fclose(file_noether[0]);
fclose(file_noether[1]);
fclose(file_oneD[0]);
fclose(file_oneD[1]);
}
free(insLineMom);
free(insLineNoetherMom);
free(insLineOneDMom);
free(proton_level1);
free(proton_level3);
free(neutron_level3);
checkCudaError();
}
void quda::threepStochDpart( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &dprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D, QKXTM_Gauge &gauge, int fixTime , char *filename ,int Nmom , int momElem[][3]){
int NmomSink = 1;
int momElemSink[1][3];
momElemSink[0][0] = 0;
momElemSink[0][1] = 0;
momElemSink[0][2] = 0;
// ! fix time is the absolute sink time
char particles_filename[MAX_PARTICLES][257];
char particles_filename_noether[MAX_PARTICLES][257];
char particles_filename_oneD[MAX_PARTICLES][257];
FILE *file_local[MAX_PARTICLES];
FILE *file_noether[MAX_PARTICLES];
FILE *file_oneD[MAX_PARTICLES];
// FILE *file_oneD[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_local.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_local.dat");
sprintf(particles_filename_noether[0],"%s_%s",filename,"proton_noether.dat");
sprintf(particles_filename_noether[1],"%s_%s",filename,"neutron_noether.dat");
sprintf(particles_filename_oneD[0],"%s_%s",filename,"proton_oneD.dat");
sprintf(particles_filename_oneD[1],"%s_%s",filename,"neutron_oneD.dat");
if(comm_rank() == 0){
#ifdef WRITE_BINARY
file_local[0] = fopen(particles_filename[0],"ab");
file_local[1] = fopen(particles_filename[1],"ab");
file_noether[0] = fopen(particles_filename_noether[0],"ab");
file_noether[1] = fopen(particles_filename_noether[1],"ab");
file_oneD[0] = fopen(particles_filename_oneD[0],"ab");
file_oneD[1] = fopen(particles_filename_oneD[1],"ab");
#else
file_local[0] = fopen(particles_filename[0],"a");
file_local[1] = fopen(particles_filename[1],"a");
file_noether[0] = fopen(particles_filename_noether[0],"a");
file_noether[1] = fopen(particles_filename_noether[1],"a");
file_oneD[0] = fopen(particles_filename_oneD[0],"a");
file_oneD[1] = fopen(particles_filename_oneD[1],"a");
#endif
if(file_local[0] == NULL || file_local[1] == NULL || file_oneD[0] == NULL || file_oneD[1] == NULL || file_noether[0] == NULL || file_noether[1] == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
hipBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
hipBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
hipBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *neutron_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(neutron_level1 == NULL || neutron_level3 == NULL || proton_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_dpart_neutron(neutron_level1,neutron_level3,*levelVec,NmomSink,momElemSink);
partialContract3pf_dpart_proton(proton_level3,*levelVec,NmomSink,momElemSink);
hipUnbindTexture(xiVector3DStochTex);
hipUnbindTexture(uprop3DStochTex);
hipUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(10*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineNoetherMom = (double*) malloc(4*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineOneDMom = (double*) malloc(8*4*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL || insLineOneDMom == NULL || insLineNoetherMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
hipBindTexture(0, propStochTex, dprop.D_elem(), dprop.Bytes());
hipBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int iflag = 0 ; iflag < 10 ; iflag++){
hipLaunchKernelGGL(( insLine_local_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine->D_elem() , iflag , 2); // (1,2,3) (upart,dpart,spart)
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //
// communication
hipBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
gauge.ghostToHost();
gauge.cpuExchangeGhost(); // communicate gauge
gauge.ghostToDevice();
comm_barrier(); // just in case
dprop.ghostToHost();
dprop.cpuExchangeGhost(); // communicate propagator
dprop.ghostToDevice();
comm_barrier(); // just in case
phi.ghostToHost();
phi.cpuExchangeGhost(); // communicate stochastic vector
phi.ghostToDevice();
comm_barrier(); // just in case
// +++++++++++++++++++ conserved current +++++++++++++++++++++++++++++++++++//
// mapping gamma
// g1 , g2 , g3 , g4
// 0 , 1 , 2 , 3
for(int idir = 0 ; idir < 4 ; idir++){
hipLaunchKernelGGL(( insLine_noether_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine->D_elem() , idir);
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++ derivative operators ++++++++++++++++++++++++++++++++//
// for derivative operators we have for gamma matrices g1,g2,g3,g4 ,g5g1,g5g2,g5g3,g5g4 => 4+4 combinations
// for derivative index we have 4 index D^0 , D^1 , D^2 , D^3
// for total we have 8*4=32 combinations
// mapping gamma indices, (derivative will have a seperate index)
// g1 , g2 , g3 , g4 , g5g1 , g5g2 , g5g3 , g5g4
// 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7
//#ifdef NEW_VERSION
QKXTM_VectorX8 *insLineX8 = new QKXTM_VectorX8();
for(int dir = 0 ; dir < 4 ; dir++){
hipLaunchKernelGGL(( insLine_oneD_kernel_new), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLineX8->D_elem(), dir);
hipDeviceSynchronize();
checkCudaError();
for(int iflag = 0 ; iflag < 8 ; iflag++){
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2, insLineX8->D_elem() + iflag*G_nSpin*G_nColor*G_localVolume*2, Nmom, momElem);
hipDeviceSynchronize();
checkCudaError();
}
}
delete insLineX8;
/*
//#else
for(int iflag = 0 ; iflag < 8 ; iflag++) // iflag perform loop over gammas
for(int dir = 0 ; dir < 4 ; dir++){
// need to find a way to improve it
insLine_oneD_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , iflag ,dir); // we dont need part here because operators are vector , axial
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
*/
//#endif
hipUnbindTexture(gaugeDerivativeTex);
printfQuda("Finish insertion line\n");
delete insLine;
hipUnbindTexture(phiVectorStochTex);
hipUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*Nmom*4*4*2*sizeof(double));
// write local
for(int iflag = 0 ; iflag < 10 ; iflag++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[1],res,iflag,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[0],res,iflag,Nmom,momElem);
}
// write conserved
for(int idir = 0 ; idir < 4 ; idir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[1],res,idir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[0],res,idir,Nmom,momElem);
}
// write derivatives
for(int iflag = 0 ; iflag < 8 ; iflag++)
for(int dir = 0 ; dir < 4 ; dir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[1],res,iflag,dir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[0],res,iflag,dir,Nmom,momElem);
}
free(res);
if(comm_rank()==0){
fclose(file_local[0]);
fclose(file_local[1]);
fclose(file_noether[0]);
fclose(file_noether[1]);
fclose(file_oneD[0]);
fclose(file_oneD[1]);
}
free(insLineMom);
free(insLineNoetherMom);
free(insLineOneDMom);
free(neutron_level1);
free(neutron_level3);
free(proton_level3);
checkCudaError();
}
//
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void quda::threepStochUpart_WilsonLinks( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &uprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D,double* deviceWilsonLinks, int fixTime , char *filename ,int Nmom , int momElem[][3], int NmomSink, int momSink[][3]){
// ! fix time is the absolute sink time
char particles_filename[MAX_PARTICLES][257];
FILE *file_Nonlocal[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_Nonlocal.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_Nonlocal.dat");
if(comm_rank() == 0){
file_Nonlocal[0] = fopen(particles_filename[0],"a");
file_Nonlocal[1] = fopen(particles_filename[1],"a");
if(file_Nonlocal[0] == NULL || file_Nonlocal[1] == NULL ){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
// QKXTM_Vector3D *levelVec = new QKXTM_Vector3D[8];
// QKXTM_Vector3D *levelVec = malloc(8*sizeof(QKXTM_Vector3D));
//levelVec[0]->QKXTM_Vector3D();
hipBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
hipBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
hipBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *proton_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(proton_level1 == NULL || proton_level3 == NULL || neutron_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_upart_proton(proton_level1,proton_level3,*levelVec,NmomSink,momSink);
partialContract3pf_upart_neutron(neutron_level3,*levelVec,NmomSink,momSink);
hipUnbindTexture(xiVector3DStochTex);
hipUnbindTexture(uprop3DStochTex);
hipUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ Non local operators +++++++++++++++++++//
hipBindTexture(0, propStochTex, uprop.D_elem(), uprop.Bytes());
hipBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int dir = 0 ; dir < 3 ; dir++)
for(int dl = 0 ; dl < G_totalL[dir]/2 ; dl++){
hipLaunchKernelGGL(( insLine_Nonlocal_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine->D_elem() ,(double2*) deviceWilsonLinks ,dl,dir); // (1,2,3) (upart,dpart,spart)
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
printfQuda("Finish insertion line\n");
delete insLine;
hipUnbindTexture(phiVectorStochTex);
hipUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*NmomSink*4*4*2*sizeof(double));
// write local
for(int dir = 0 ; dir < 3 ; dir++)
for(int iflag = 0 ; iflag < G_totalL[dir]/2 ; iflag++){
finalize_contract3pf_mixLevel_SinkMom((Complex*) res,(Complex*) (insLineMom +dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[0],res,dir,iflag,NmomSink,momSink);
finalize_contract3pf_oneLevel_SinkMom((Complex*) res,(Complex*) (insLineMom +dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[1],res,dir,iflag,NmomSink,momSink);
}
free(res);
if(comm_rank()==0){
fclose(file_Nonlocal[0]);
fclose(file_Nonlocal[1]);
}
free(insLineMom);
free(proton_level1);
free(proton_level3);
free(neutron_level3);
checkCudaError();
}
void quda::threepStochDpart_WilsonLinks( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &dprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D, double* deviceWilsonLinks, int fixTime , char *filename ,int Nmom , int momElem[][3], int NmomSink, int momSink[][3]){
// ! fix time is the absolute sink time
char particles_filename[MAX_PARTICLES][257];
FILE *file_Nonlocal[MAX_PARTICLES];
// FILE *file_oneD[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_Nonlocal.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_Nonlocal.dat");
if(comm_rank() == 0){
file_Nonlocal[0] = fopen(particles_filename[0],"a");
file_Nonlocal[1] = fopen(particles_filename[1],"a");
if(file_Nonlocal[0] == NULL || file_Nonlocal[1] == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
hipBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
hipBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
hipBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *neutron_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(neutron_level1 == NULL || neutron_level3 == NULL || proton_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_dpart_neutron(neutron_level1,neutron_level3,*levelVec,NmomSink,momSink);
partialContract3pf_dpart_proton(proton_level3,*levelVec,NmomSink,momSink);
hipUnbindTexture(xiVector3DStochTex);
hipUnbindTexture(uprop3DStochTex);
hipUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
hipBindTexture(0, propStochTex, dprop.D_elem(), dprop.Bytes());
hipBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int dir = 0 ; dir < 3 ; dir++)
for(int dl = 0 ; dl < G_totalL[dir]/2 ; dl++){
hipLaunchKernelGGL(( insLine_Nonlocal_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine->D_elem() , (double2*) deviceWilsonLinks ,dl,dir); // (1,2,3) (upart,dpart,spart)
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //
printfQuda("Finish insertion line\n");
delete insLine;
hipUnbindTexture(phiVectorStochTex);
hipUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*NmomSink*4*4*2*sizeof(double));
// write nonlocal
for(int dir = 0 ; dir < 3 ; dir++)
for(int iflag = 0 ; iflag < G_totalL[0]/2 ; iflag++){
finalize_contract3pf_mixLevel_SinkMom((Complex*) res,(Complex*) (insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[1],res,dir,iflag,NmomSink,momSink);
finalize_contract3pf_oneLevel_SinkMom((Complex*) res,(Complex*) (insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[0],res,dir,iflag,NmomSink,momSink);
}
free(res);
if(comm_rank()==0){
fclose(file_Nonlocal[0]);
fclose(file_Nonlocal[1]);
}
free(insLineMom);
free(neutron_level1);
free(neutron_level3);
free(proton_level3);
checkCudaError();
}
//#define TEST_PION
void quda::threepStochPion_WilsonLinks( QKXTM_Vector &dphi , QKXTM_Vector3D &xi ,QKXTM_Propagator &uprop , QKXTM_Propagator3D &uprop3D , double* deviceWilsonLinks, int fixTime , char *filename ,int Nmom , int momElem[][3], int NmomSink, int momSink[][3]){
char pion_filename_up[257],pion_filename_down[257];
FILE *filePion_up;
FILE *filePion_down;
sprintf(pion_filename_up,"%s_%s",filename,"pion_Nonlocal_up.dat");
sprintf(pion_filename_down,"%s_%s",filename,"pion_Nonlocal_down.dat");
if( comm_rank() == 0){
filePion_up = fopen(pion_filename_up,"a");
filePion_down = fopen(pion_filename_down,"a");
if(filePion_up == NULL || filePion_down == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec_up = new QKXTM_Vector3D();
QKXTM_Vector3D *levelVec_down = new QKXTM_Vector3D();
hipBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
hipBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *pion_level_up = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *pion_level_down = (double*) malloc(NmomSink*4*3*2*sizeof(double));
if(pion_level_up == NULL || pion_level_down == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_upart_pion(pion_level_up,*levelVec_up,NmomSink,momSink);
partialContract3pf_dpart_pion(pion_level_down,*levelVec_down,NmomSink,momSink);
#ifdef TEST_PION
FILE *ptr_Vx;
ptr_Vx = fopen("VX_quda.dat","w");
levelVec_up->download();
for(int mu = 0 ; mu < 4 ; mu++)
for(int c = 0 ; c < 3 ; c++)
fprintf(ptr_Vx,"%d %d %+e %+e\n",mu,c,pion_level_up[mu*3*2+c*2+0],pion_level_up[mu*3*2+c*2+1]);
// for(int iv3 = 0 ; iv3 < G_localVolume/G_localL[3] ; iv3++)
// for(int mu = 0 ; mu < 4 ; mu++)
// for(int c = 0 ; c < 3 ; c++)
// fprintf(ptr_Vx,"%d %d %+e %+e\n",mu,c,levelVec_up->H_elem()[iv3*4*3*2+mu*3*2+c*2+0],levelVec_up->H_elem()[iv3*4*3*2+mu*3*2+c*2+1]);
fclose(ptr_Vx);
#endif
hipUnbindTexture(uprop3DStochTex);
hipUnbindTexture(xiVector3DStochTex);
delete levelVec_up;
delete levelVec_down;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom_up = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineMom_down = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom_up == NULL || insLineMom_down == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
hipBindTexture(0, propStochTex, uprop.D_elem(), uprop.Bytes());
hipBindTexture(0, phiVectorStochTex, dphi.D_elem(), dphi.Bytes());
QKXTM_Vector *insLine_up = new QKXTM_Vector();
QKXTM_Vector *insLine_down = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int dir = 0 ; dir < 3 ; dir++)
for(int dl = 0 ; dl < G_totalL[dir]/2 ; dl++){
// upart
hipLaunchKernelGGL(( insLine_Nonlocal_pion_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine_up->D_elem() , (double2*) deviceWilsonLinks ,dl,dir, 0);
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom_up + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine_up->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
//dpart
hipLaunchKernelGGL(( insLine_Nonlocal_pion_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) insLine_down->D_elem() , (double2*) deviceWilsonLinks ,dl,dir, 1);
hipDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom_down + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine_down->D_elem() , Nmom , momElem );
hipDeviceSynchronize();
checkCudaError();
}
#ifdef TEST_PION
FILE *ptr_Vt;
ptr_Vt = fopen("VT_quda.dat","w");
for(int it = 0 ; it < G_localL[3] ; it++)
for(int mu = 0 ; mu < 4 ; mu++)
for(int c = 0 ; c < 3 ; c++)
fprintf(ptr_Vt,"%d %d %d %+e %+e\n",it,mu,c,insLineMom_up[it*4*3*2+mu*3*2+c*2+0],insLineMom_up[it*4*3*2+mu*3*2+c*2+1]);
fclose(ptr_Vt);
FILE *ptr_Vt_full;
ptr_Vt_full = fopen("VT_quda_full.dat","w");
insLine_up->download();
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < 4 ; mu++)
for(int c = 0 ; c < 3 ; c++)
fprintf(ptr_Vt_full,"%d %d %d %+e %+e\n",iv,mu,c,insLine_up->H_elem()[iv*4*3*2 + mu*3*2 + c*2 + 0],insLine_up->H_elem()[iv*4*3*2 + mu*3*2 + c*2 + 1]);
fclose(ptr_Vt_full);
#endif
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //
printfQuda("Finish insertion line\n");
delete insLine_up;
delete insLine_down;
hipUnbindTexture(phiVectorStochTex);
hipUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res_up = (double*) malloc(G_localL[3]*NmomSink*2*sizeof(double));
double *res_down = (double*) malloc(G_localL[3]*NmomSink*2*sizeof(double));
for(int dir = 0 ; dir < 3 ; dir++)
for(int iflag = 0 ; iflag < G_totalL[0]/2 ; iflag++){
finalize_contract3pf_Pion_SinkMom((Complex*) res_up,(Complex*) (insLineMom_up + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) pion_level_up, NmomSink, momSink);
finalize_contract3pf_Pion_SinkMom((Complex*) res_down,(Complex*) (insLineMom_down + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) pion_level_down, NmomSink, momSink);
write_3pf_Nonlocal_Pion_zeroMomIns(filePion_up,res_up,dir,iflag,NmomSink,momSink);
write_3pf_Nonlocal_Pion_zeroMomIns(filePion_down,res_down,dir,iflag,NmomSink,momSink);
}
free(res_up);
free(res_down);
if(comm_rank()==0){
fclose(filePion_up);
fclose(filePion_down);
}
free(insLineMom_up);
free(insLineMom_down);
free(pion_level_up);
free(pion_level_down);
checkCudaError();
}
#undef MAX_PARTICLES
#define THREADS_PER_BLOCK_ARNOLDI 64
void Arnoldi::uploadToCuda(cudaColorSpinorField &cudaVector, int offset){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
hipLaunchKernelGGL(( uploadToCuda_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) (d_mNxM + offset*G_localVolume*G_nSpin*G_nColor) , (double2*) pointEven, (double2*) pointOdd);
hipDeviceSynchronize();
checkCudaError();
}
void Arnoldi::downloadFromCuda(cudaColorSpinorField &cudaVector, int offset){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
hipLaunchKernelGGL(( downloadFromCuda_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) (d_mNxM + offset*G_localVolume*G_nSpin*G_nColor) , (double2*) pointEven, (double2*) pointOdd);
hipDeviceSynchronize();
checkCudaError();
}
/*
void Arnoldi::matrixNxMmatrixMxL(Arnoldi &V,int NL, int M,int L,bool transpose){
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
hipMemcpyToSymbol(c_matrixQ , d_mMxM , M*M*sizeof(double2),0,hipMemcpyDeviceToDevice );
checkCudaError();
matrixNxMmatrixMxL_kernel<<<gridDim,blockDim,blockDim.x*M*sizeof(Complex)>>>( (double2*) V.D_mNxM(), NL , M, L ,transpose);
hipDeviceSynchronize();
checkCudaError();
}
*/
void Arnoldi::matrixNxMmatrixMxLReal(Arnoldi &V,int NL, int M,int L,bool transpose){
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
Complex *h_Q = (Complex*) calloc(M*M,sizeof(Complex));
double *h_Qr = (double*) calloc(M*M,sizeof(double));
hipMemcpy((void*)h_Q,(void*) d_mMxM, M*M*sizeof(Complex), hipMemcpyDeviceToHost);
for(int i = 0 ; i < M ; i++)
for(int j = 0 ; j < M ; j++)
h_Qr[i*M+j] = h_Q[i*M+j].real();
hipMemcpyToSymbol(c_matrixQ , h_Qr , M*M*sizeof(double) );
checkCudaError();
hipLaunchKernelGGL(( matrixNxMmatrixMxLReal_kernel), dim3(gridDim),dim3(blockDim),blockDim.x*M*sizeof(Complex), 0, (double2*) V.D_mNxM(), NL , M, L ,transpose);
hipDeviceSynchronize();
checkCudaError();
}
void quda::clearNoiseCuda(Complex *A, int L, double tolerance){
hipMemcpyToSymbol(c_tolArnoldi , &tolerance , sizeof(double));
checkCudaError();
dim3 blockDim( L , 1, 1);
dim3 gridDim( 1 , 1 , 1);
hipLaunchKernelGGL(( noiseCleaner_kernel), dim3(gridDim),dim3(blockDim),blockDim.x*sizeof(double), 0, (double2*)A);
hipDeviceSynchronize();
checkCudaError();
}
#define THREADS_PER_BLOCK_LANCZOS 64
void Lanczos::uploadToCuda(cudaColorSpinorField &cudaVector, int offset){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_LANCZOS , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
hipLaunchKernelGGL(( uploadToCuda_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) (d_mNxM + offset*G_localVolume*G_nSpin*G_nColor) , (double2*) pointEven, (double2*) pointOdd);
hipDeviceSynchronize();
checkCudaError();
}
void Lanczos::downloadFromCuda(cudaColorSpinorField &cudaVector){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_LANCZOS , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
hipLaunchKernelGGL(( downloadFromCuda_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double2*) d_vN , (double2*) pointEven, (double2*) pointOdd);
hipDeviceSynchronize();
checkCudaError();
}
void Lanczos::matrixNxMmatrixMxLReal(Lanczos &V,int NL, int M,int L,bool transpose){
dim3 blockDim( THREADS_PER_BLOCK_LANCZOS , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
#ifdef __MATRIX_IN_CONSTANT_MEMORY__
double *h_Q = (double*) calloc(M*M,sizeof(double));
hipMemcpy((void*)h_Q,(void*) d_mMxM, M*M*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpyToSymbol(c_matrixQ , h_Q , M*M*sizeof(double) );
checkCudaError();
free(h_Q);
hipLaunchKernelGGL(( matrixNxMmatrixMxLReal_kernel), dim3(gridDim),dim3(blockDim),blockDim.x*M*sizeof(Complex), 0, (double2*) V.D_mNxM(), NL , M, L ,transpose);
#else
printfQuda("ChechPoint 1\n");
fflush(stdout);
hipBindTexture(0,matrixTex,d_mMxM,size);
checkCudaError();
printfQuda("ChechPoint 2\n");
fflush(stdout);
hipLaunchKernelGGL(( matrixNxMmatrixMxLRealTexture_kernel), dim3(gridDim),dim3(blockDim),blockDim.x*M*sizeof(Complex), 0, (double2*) V.D_mNxM(), NL , M, L ,transpose);
checkCudaError();
printfQuda("ChechPoint 3\n");
fflush(stdout);
hipUnbindTexture(matrixTex);
printfQuda("ChechPoint 4\n");
fflush(stdout);
#endif
hipDeviceSynchronize();
checkCudaError();
}
void Lanczos::makeTridiagonal(int m,int l){
dim3 blockDim( m , 1, 1);
dim3 gridDim( l , 1 , 1);
hipLaunchKernelGGL(( makeTridiagonal_kernel), dim3(gridDim),dim3(blockDim), 0, 0, (double*) this->D_mMxM());
hipDeviceSynchronize();
checkCudaError();
}
|
dc61a42781089d7826aaf0eaeb36af34307aff8a.cu
|
#include <qudaQKXTM.h>
#include <errno.h>
#include <mpi.h>
#include <cuPrintf.cu>
#include <arlib.h>
#include <lanczos.h>
#include <limits>
#define THREADS_PER_BLOCK 32
#define PI 3.141592653589793
using namespace quda;
// $$ Section 1: Texture References $$
/* texture block */
texture<int4, 1> gaugeTexPlaq; // use this texture reference only for plaquette
texture<int4, 1> gaugeTexAPE; // use this for APE smearing , this texture will be binded and unbinded
texture<int4, 1> vectorTexGauss; // this texture needed for gaussian smearing
texture<int4, 1> propagatorTexAPE; // APE smearing need a propagator structure
texture<int4, 1> gaugeTexNorm2;
texture<int4, 1> vectorTexNorm2; // to find the norm
texture<int4, 1> propagatorTexNorm2;
texture<int4, 1> propagatorTexOne; // for contractions
texture<int4, 1> propagatorTexTwo;
texture<int4, 1> correlationTex;
texture<int4, 1> propagator3DTex1;
texture<int4, 1> propagator3DTex2;
texture<int4, 1> seqPropagatorTex;
texture<int4, 1> fwdPropagatorTex;
texture<int4, 1> gaugeDerivativeTex;
texture<int4, 1> phiVectorStochTex;
texture<int4, 1> propStochTex;
texture<int4, 1> insLineFourierTex;
texture<int4, 1> uprop3DStochTex;
texture<int4, 1> dprop3DStochTex;
texture<int4, 1> sprop3DStochTex;
texture<int4, 1> insLineMomTex;
texture<int4, 1> xiVector3DStochTex;
texture<int4, 1> gaugePath; // bind standard texture for wilson path
texture<int4, 1>gaugeTexHYP;
texture<int4, 1>propagatorTexHYP;
texture<int4, 1>fieldTex;
texture<int2, 1>matrixTex;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 2: Constant Refeneces $$
/* block for device constants */
__constant__ bool c_dimBreak[4];
__constant__ int c_nColor;
__constant__ int c_nDim;
__constant__ int c_localL[4];
__constant__ int c_plusGhost[4];
__constant__ int c_minusGhost[4];
__constant__ int c_stride;
__constant__ int c_surface[4];
__constant__ int c_nSpin;
__constant__ double c_alphaAPE;
__constant__ double c_alphaGauss;
__constant__ int c_threads;
__constant__ int c_eps[6][3];
__constant__ int c_sgn_eps[6];
__constant__ int c_procPosition[4];
__constant__ int c_sourcePosition[4];
__constant__ int c_totalL[4];
__constant__ double c_matrixQ[50*50];
__constant__ double c_tolArnoldi;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 3: Device /*Inline*/ Functions $$
/* Block for device kernels */
#if (__COMPUTE_CAPABILITY__ >= 130)
__inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
int4 v = tex1Dfetch(t,i);
return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z));
}
__inline__ __device__ double fetch_double(texture<int2, 1> t, int i)
{
int2 v = tex1Dfetch(t,i);
return __hiloint2double(v.y,v.x);
}
#else
__inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i)
{
// do nothing
return make_double2(0.0, 0.0);
}
#endif
__device__ inline double2 operator*(const double a , const double2 b){
double2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
__device__ inline double2 operator*(const int a , const double2 b){
double2 res;
res.x = a*b.x;
res.y = a*b.y;
return res;
}
__device__ inline double2 operator*(const double2 a, const double2 b){
double2 res;
res.x = a.x*b.x - a.y*b.y;
res.y = a.x*b.y + a.y*b.x;
return res;
}
__device__ inline double2 operator+(const double2 a, const double2 b){
double2 res;
res.x = a.x + b.x;
res.y = a.y + b.y;
return res;
}
__device__ inline double2 operator-(const double2 a, const double2 b){
double2 res;
res.x = a.x - b.x;
res.y = a.y - b.y;
return res;
}
__device__ inline double2 conj(const double2 a){
double2 res;
res.x = a.x;
res.y = -a.y;
return res;
}
__device__ inline double norm(const double2 a){
double res;
res = sqrt(a.x*a.x + a.y*a.y);
return res;
}
__device__ inline double norm2(const double2 a){
double res;
res = a.x*a.x + a.y*a.y;
return res;
}
__device__ inline double2 cpow(const double2 x , const double a){
double2 res;
res.x = pow(norm(x),a) * cos( atan2(x.y,x.x) * a);
res.y = pow(norm(x),a) * sin( atan2(x.y,x.x) * a);
return res;
}
__device__ inline double2 operator/(const double2 x, const double2 y){
double2 res;
res.x = (x.x * y.x + x.y * y.y) / (y.x * y.x + y.y * y.y);
res.y = (x.y * y.x - x.x * y.y) / (y.x * y.x + y.y * y.y);
return res;
}
// $$ Section 4: Device Kernels $$
#include <core_def.h>
__global__ void calculatePlaq_kernel(double *partial_plaq){
#include <plaquette_core.h>
}
__global__ void APE_kernel_1(double2 *prp , double2 *out){
#include <APE_core_1.h>
}
#ifdef _PROPAGATOR_APE_TEX
__global__ void APE_kernel_2(double2 *out){
#include <APE_core_2.h>
}
#else
__global__ void APE_kernel_2(double2 *prop,double2 *out){
#include <APE_core_2.h>
}
#endif
__global__ void Gauss_kernel(double2 *out){
#include <Gauss_core.h>
}
__global__ void norm2Gauge_kernel(double *cache){
#include <norm2Gauge_core.h>
}
__global__ void norm2Vector_kernel(double *cache){
#include <norm2Vector_core.h>
}
__global__ void norm2Propagator_kernel(double *cache){
#include <norm2Propagator_core.h>
}
__global__ void uploadToCuda_kernel(double2 *in, double2 *outEven, double2 *outOdd){
#include <uploadToCuda_core.h>
}
__global__ void downloadFromCuda_kernel(double2 *out, double2 *inEven, double2 *inOdd){
#include <downloadFromCuda_core.h>
}
__global__ void scaleVector_kernel(double2 *inOut,double a){
#include <scaleVector_core.h>
}
__global__ void rotateToPhysicalBase_kernel(double2 *inOut, int sign){
#include <rotateToPhysicalBase_core.h>
}
__global__ void contract_Type1_kernel(double2 *out){
#include <contract_Type1_core.h>
}
__global__ void fourierCorr_kernel(double2 *block ,int it ,int nx , int ny , int nz){
#include <fourierCorr_core.h>
}
__global__ void fourierCorr_kernel2(double2 *block ,int it ,int nx , int ny , int nz){
#include <fourierCorr_core2.h>
}
__global__ void fourierCorr_kernel3(double2 *block ,int it , int nx , int ny , int nz){
#include <fourierCorr_core3.h>
}
__global__ void fourierCorr_kernel4(double2 *block , int nx , int ny , int nz){
#include <fourierCorr_core4.h>
}
__global__ void seqSourceFixSinkPart1_kernel( double2 *out, int timeslice ,int c_nu, int c_c2, whatProjector typeProj, whatParticle testParticle ){
#include <seqSourceFixSinkPart1_core.h>
}
__global__ void seqSourceFixSinkPart2_kernel( double2 *out, int timeslice ,int c_nu, int c_c2, whatProjector typeProj, whatParticle testParticle ){
#include <seqSourceFixSinkPart2_core.h>
}
__global__ void conjugate_vector_kernel( double2 *inOut ){
#include <conjugate_vector_core.h>
}
__global__ void conjugate_propagator_kernel( double2 *inOut ){
#include <conjugate_propagator_core.h>
}
__global__ void apply_gamma5_vector_kernel( double2 *inOut ){
#include <apply_gamma5_vector_core.h>
}
__global__ void apply_gamma_transf_vector_kernel( double2 *inOut ){
#include <apply_gamma_transf_vector_core.h>
}
__global__ void apply_gamma5_propagator_kernel( double2 *inOut ){
#include <apply_gamma5_propagator_core.h>
}
__global__ void fixSinkContractions_local_kernel( double2 *out, int flag, whatParticle testParticle, int partFlag){ // partFlag must be 1 or 2
#include <fixSinkContractions_local_core.h>
}
__global__ void fixSinkContractions_oneD_kernel( double2 *out ,int flag,int dir ,whatParticle testParticle,int partFlag){
#include <fixSinkContractions_oneD_core.h>
}
__global__ void fixSinkContractions_noether_kernel( double2 *out ,int dir ,whatParticle testParticle,int partFlag){
#include <fixSinkContractions_noether_core.h>
}
__global__ void fixSinkContractions_nonLocal_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, whatParticle testParticle, int partFlag,int direction){
#include <fixSinkContractions_nonLocal_core.h>
}
__global__ void fixSinkContractions_nonLocalBwd_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, whatParticle testParticle, int partFlag,int direction){
#include <fixSinkContractions_nonLocalBwd_core.h>
}
__global__ void insLine_local_kernel( double2 *out , int iflag , int partFlag ){
#include <insLine_local_core.h>
}
__global__ void insLine_oneD_kernel(double2 *out, int iflag , int dir){
#include <insLine_oneD_core.h>
}
__global__ void insLine_oneD_kernel_new(double2 *out, int dir){
#include <insLine_oneD_core_new.h>
}
__global__ void insLine_noether_kernel(double2 *out, int dir){
#include <insLine_noether_core.h>
}
__global__ void contract3pf_Type1_1_kernel( double2 *out, int iflag, int index1 , int index2){
#include <contract3pf_Type1_1_core.h>
}
__global__ void contract3pf_Type1_2_kernel( double2 *out, int iflag, int index1){
#include <contract3pf_Type1_2_core.h>
}
__global__ void partial_lvl1_Contract3pf_Type1_1_kernel(double2 *out, int index1, int index2){
#include <partial_lvl1_Contract3pf_Type1_1_core.h>
}
__global__ void partial_lvl3_Contract3pf_Type1_1_kernel(double2 *out,int gamma,int gamma1, int index1, int index2){
#include <partial_lvl3_Contract3pf_Type1_1_core.h>
}
__global__ void partial_lvl3_Contract3pf_Type1_2_kernel(double2 *out,int gamma,int gamma1, int index1){
#include <partial_lvl3_Contract3pf_Type1_2_core.h>
}
///////////// NEW
__global__ void partial_Contract3pf_pion_kernel(double2 *out, int index){
#include <partial_Contract3pf_pion_core.h>
}
__global__ void insLine_Nonlocal_pion_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, int dir,int index){
#include <insLine_Nonlocal_pion_core.h>
}
__global__ void contract_twop_pion_kernel(double2 *out){
#include <contract_twop_pion_core.h>
}
__global__ void fourierPion_kernel(double2 *block ,int it ,int nx , int ny , int nz){
#include <fourierPion_core.h>
}
__global__ void getVectorProp3D_kernel( double2 *out, int timeslice ,int nu,int c2){
#include <getVectorProp3D_core.h>
}
////////////////
__global__ void createWilsonPath_kernel(double2 *deviceWilsonPath,int direction){
#include <createWilsonPath_core.h>
}
__global__ void createWilsonPathBwd_kernel(double2 *deviceWilsonPath,int direction){
#include <createWilsonPathBwd_core.h>
}
__global__ void createWilsonPath_kernel_all(double2 *deviceWilsonPath){
#include <createWilsonPath_allDirections_core.h>
}
__global__ void insLine_Nonlocal_kernel(double2 *out, double2 *deviceWilsonLinks, int dl, int dir){
#include <insLine_Nonlocal_core.h>
}
__global__ void HYP3D_kernel_1(double2 *prp1){
#include <HYP3D_core_1.h>
}
__global__ void HYP3D_kernel_2(double2 *prp2){
#include <HYP3D_core_2.h>
}
__global__ void HYP3D_kernel_3(double2 *prp2, double omega2){
#include <HYP3D_core_3.h>
}
__global__ void HYP3D_kernel_4(double2 *prp1,double2 *out){
#include <HYP3D_core_4.h>
}
__global__ void HYP3D_kernel_5(double2 *out, double omega1){
#include <HYP3D_core_5.h>
}
__global__ void apply_momentum_kernel(double2 *vector, int nx , int ny , int nz){
#include <apply_momentum_core.h>
}
/*
__global__ void matrixNxMmatrixMxL_kernel(double2 *mNxM, int NL, int M, int L, bool transpose){
#include <matrixNxMmatrixMxL_core.h>
}
*/
__global__ void matrixNxMmatrixMxLReal_kernel(double2 *mNxM, int NL, int M, int L, bool transpose){
#include <matrixNxMmatrixMxLReal_core.h>
}
__global__ void matrixNxMmatrixMxLRealTexture_kernel(double2 *mNxM, int NL, int M, int L, bool transpose){
#include <matrixNxMmatrixMxLRealTexture_core.h>
}
__global__ void noiseCleaner_kernel(double2 *A){
#include <noiseCleaner_core.h>
}
__global__ void makeTridiagonal_kernel(double *A){
#include <makeTridiagonal_core.h>
}
__global__ void UxMomentumPhase_kernel(double2 *inOut, int px, int py, int pz, double zeta){
#include <UxMomentumPhase_core.h>
}
///////////////////////////////////////////////////////////////////////////////
// $$ Section 5: Static Global Variables $$
///////////////////////////////////////////////////
/* Block for static global variables */
float G_deviceMemory = 0.;
int G_nColor;
int G_nSpin;
int G_nDim;
int G_strideFull;
double G_alphaAPE;
double G_alphaGauss;
int G_localVolume;
int G_totalVolume;
int G_nsmearAPE;
int G_nsmearGauss;
bool G_dimBreak[QUDAQKXTM_DIM];
int G_localL[QUDAQKXTM_DIM];
int G_totalL[QUDAQKXTM_DIM];
int G_nProc[QUDAQKXTM_DIM];
int G_plusGhost[QUDAQKXTM_DIM];
int G_minusGhost[QUDAQKXTM_DIM];
int G_surface3D[QUDAQKXTM_DIM];
bool G_init_qudaQKXTM_flag = false;
int G_nsmearHYP;
double G_omega1HYP;
double G_omega2HYP;
// for mpi use global variables
MPI_Group G_fullGroup , G_spaceGroup , G_timeGroup;
MPI_Comm G_spaceComm , G_timeComm;
int G_localRank;
int G_localSize;
int G_timeRank;
int G_timeSize;
//////////////////////////////////////////////////
// $$ Section 6: Initialize qudaQKXTM $$
// initialization function for qudaQKXTM lib
void quda::init_qudaQKXTM(qudaQKXTMinfo *info){
if(G_init_qudaQKXTM_flag == false){
G_nColor = 3;
G_nSpin = 4;
G_nDim = QUDAQKXTM_DIM;
G_alphaAPE = info->alphaAPE;
G_alphaGauss = info->alphaGauss;
G_nsmearAPE = info->nsmearAPE;
G_nsmearGauss = info->nsmearGauss;
G_nsmearHYP = info->nsmearHYP;
G_omega1HYP = info->omega1HYP;
G_omega2HYP = info->omega2HYP;
// from now on depends on lattice and break format we choose
for(int i = 0 ; i < G_nDim ; i++)
G_nProc[i] = comm_dim(i);
for(int i = 0 ; i < G_nDim ; i++){ // take local and total lattice
G_localL[i] = info->lL[i];
G_totalL[i] = G_nProc[i] * G_localL[i];
}
G_localVolume = 1;
G_totalVolume = 1;
for(int i = 0 ; i < G_nDim ; i++){
G_localVolume *= G_localL[i];
G_totalVolume *= G_totalL[i];
}
G_strideFull = G_localVolume;
for (int i=0; i<G_nDim; i++) {
G_surface3D[i] = 1;
for (int j=0; j<G_nDim; j++) {
if (i==j) continue;
G_surface3D[i] *= G_localL[j];
}
}
for(int i = 0 ; i < G_nDim ; i++)
if( G_localL[i] == G_totalL[i] )
G_surface3D[i] = 0;
for(int i = 0 ; i < G_nDim ; i++){
G_plusGhost[i] =0;
G_minusGhost[i] = 0;
}
#ifdef MULTI_GPU
int lastIndex = G_localVolume;
for(int i = 0 ; i < G_nDim ; i++)
if( G_localL[i] < G_totalL[i] ){
G_plusGhost[i] = lastIndex ;
G_minusGhost[i] = lastIndex + G_surface3D[i];
lastIndex += 2*G_surface3D[i];
}
#endif
for(int i = 0 ; i < G_nDim ; i++){
if( G_localL[i] < G_totalL[i])
G_dimBreak[i] = true;
else
G_dimBreak[i] = false;
}
const int eps[6][3]=
{
{0,1,2},
{2,0,1},
{1,2,0},
{2,1,0},
{0,2,1},
{1,0,2}
};
const int sgn_eps[6]=
{
+1,+1,+1,-1,-1,-1
};
int procPosition[4];
for(int i= 0 ; i < 4 ; i++)
procPosition[i] = comm_coords(i);
int sourcePosition[4];
// put it zero but change it later
for(int i = 0 ; i < 4 ; i++)
sourcePosition[i] = info->sourcePosition[i];
// initialization consist also from define device constants
cudaMemcpyToSymbol(c_nColor, &G_nColor, sizeof(int) );
cudaMemcpyToSymbol(c_nSpin, &G_nSpin, sizeof(int) );
cudaMemcpyToSymbol(c_nDim, &G_nDim, sizeof(int) );
cudaMemcpyToSymbol(c_stride, &G_strideFull, sizeof(int) );
cudaMemcpyToSymbol(c_alphaAPE, &G_alphaAPE , sizeof(double) );
cudaMemcpyToSymbol(c_alphaGauss, &G_alphaGauss , sizeof(double) );
cudaMemcpyToSymbol(c_threads , &G_localVolume , sizeof(double) ); // may change
cudaMemcpyToSymbol(c_dimBreak , G_dimBreak , QUDAQKXTM_DIM*sizeof(bool) );
cudaMemcpyToSymbol(c_localL , G_localL , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_totalL , G_totalL , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_plusGhost , G_plusGhost , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_minusGhost , G_minusGhost , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_surface , G_surface3D , QUDAQKXTM_DIM*sizeof(int) );
cudaMemcpyToSymbol(c_eps, &(eps[0][0]) , 6*3*sizeof(int) );
cudaMemcpyToSymbol(c_sgn_eps, sgn_eps , 6*sizeof(int) );
cudaMemcpyToSymbol(c_procPosition, procPosition, QUDAQKXTM_DIM*sizeof(int));
cudaMemcpyToSymbol(c_sourcePosition, sourcePosition, QUDAQKXTM_DIM*sizeof(int));
// double machineEpsilon = std::numeric_limits<double>::epsilon();
// cudaMemcpyToSymbol(c_machineEpsilon , &machineEpsilon , sizeof(double));
checkCudaError();
// create groups of process to use mpi reduce only on spatial points
MPI_Comm_group(MPI_COMM_WORLD, &G_fullGroup);
int space3D_proc;
space3D_proc = G_nProc[0] * G_nProc[1] * G_nProc[2];
int *ranks = (int*) malloc(space3D_proc*sizeof(int));
for(int i= 0 ; i < space3D_proc ; i++)
ranks[i] = comm_coords(3) * space3D_proc + i;
printf("%d (%d,%d,%d,%d)\n",comm_rank(),comm_coords(0),comm_coords(1),comm_coords(2),comm_coords(3));
for(int i= 0 ; i < space3D_proc ; i++)
printf("%d %d\n",comm_rank(), ranks[i]);
MPI_Group_incl(G_fullGroup,space3D_proc,ranks,&G_spaceGroup);
MPI_Group_rank(G_spaceGroup,&G_localRank);
MPI_Group_size(G_spaceGroup,&G_localSize);
MPI_Comm_create(MPI_COMM_WORLD, G_spaceGroup , &G_spaceComm);
// create group of process to use mpi gather
int *ranksTime = (int*) malloc(G_nProc[3]*sizeof(int));
for(int i=0 ; i < G_nProc[3] ; i++)
ranksTime[i] = i*space3D_proc;
MPI_Group_incl(G_fullGroup,G_nProc[3], ranksTime, &G_timeGroup);
MPI_Group_rank(G_timeGroup, &G_timeRank);
MPI_Group_size(G_timeGroup, &G_timeSize);
MPI_Comm_create(MPI_COMM_WORLD, G_timeGroup, &G_timeComm);
//////////////////////////////////////////////////////////////////////////////
free(ranks);
free(ranksTime);
G_init_qudaQKXTM_flag = true;
printfQuda("qudaQKXTM has been initialized\n");
}
else
return;
}
int quda::comm_localRank(){
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
return G_localRank;
}
void quda::printf_qudaQKXTM(){
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
printfQuda("Number of colors is %d\n",G_nColor);
printfQuda("Number of spins is %d\n",G_nSpin);
printfQuda("Number of dimensions is %d\n",G_nDim);
printfQuda("Number of process in each direction is (x,y,z,t) %d x %d x %d x %d\n",G_nProc[0],G_nProc[1],G_nProc[2],G_nProc[3]);
printfQuda("Total lattice is (x,y,z,t) %d x %d x %d x %d\n",G_totalL[0],G_totalL[1],G_totalL[2],G_totalL[3]);
printfQuda("Local lattice is (x,y,z,t) %d x %d x %d x %d\n",G_localL[0],G_localL[1],G_localL[2],G_localL[3]);
printfQuda("Total volume is %d\n",G_totalVolume);
printfQuda("Local volume is %d\n",G_localVolume);
printfQuda("Surface is (x,y,z,t) ( %d , %d , %d , %d)\n",G_surface3D[0],G_surface3D[1],G_surface3D[2],G_surface3D[3]);
printfQuda("The plus Ghost points in directions (x,y,z,t) ( %d , %d , %d , %d )\n",G_plusGhost[0],G_plusGhost[1],G_plusGhost[2],G_plusGhost[3]);
printfQuda("The Minus Ghost points in directixons (x,y,z,t) ( %d , %d , %d , %d )\n",G_minusGhost[0],G_minusGhost[1],G_minusGhost[2],G_minusGhost[3]);
printfQuda("For APE smearing we use nsmear = %d , alpha = %lf\n",G_nsmearAPE,G_alphaAPE);
printfQuda("For Gauss smearing we use nsmear = %d , alpha = %lf\n",G_nsmearGauss,G_alphaGauss);
printfQuda("For HYP smearing we use nsmear = %d , omega1 = %lf , omega2 = %lf\n",G_nsmearHYP,G_omega1HYP,G_omega2HYP);
}
/////////////////// METHODS //////////////////////////////
//////////////////////// class QKXTM_Field /////////////////////////////
// $$ Section 7: Class QKXTM_Field $$
QKXTM_Field::QKXTM_Field():
h_elem(NULL) , d_elem(NULL) , h_ext_ghost(NULL) , d_ext_ghost(NULL), field_binded(false)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
field_length = 1;
ghost_length = 0;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
// create_all();
}
QKXTM_Field::~QKXTM_Field(){
// destroy_all();
}
void QKXTM_Field::create_host(){
h_elem = (double*) malloc(bytes_total_length);
if(h_elem == NULL) errorQuda("Error with allocation host memory");
}
void QKXTM_Field::create_host_ghost(){
#ifdef MULTI_GPU
if( comm_size() > 1){
h_ext_ghost = (double*) malloc(bytes_ghost_length);
if(h_ext_ghost == NULL)errorQuda("Error with allocation host memory");
}
#endif
}
void QKXTM_Field::create_device(){
cudaMalloc((void**)&d_elem,bytes_total_length);
checkCudaError();
G_deviceMemory += bytes_total_length/(1024.*1024.); // device memory in MB
printfQuda("Device memory in used is %f MB A \n",G_deviceMemory);
}
void QKXTM_Field::create_device_ghost(){
#ifdef MULTI_GPU
if( comm_size() > 1){
cudaMalloc((void**)&d_ext_ghost,bytes_ghost_length);
checkCudaError();
G_deviceMemory += bytes_ghost_length/(1024.*1024.);
printfQuda("Device memory in used is %f MB A \n",G_deviceMemory);
}
#endif
}
void QKXTM_Field::destroy_host(){
free(h_elem);
h_elem = NULL;
}
void QKXTM_Field::destroy_device(){
if(d_elem != NULL){
cudaFree(d_elem);
checkCudaError();
d_elem = NULL;
G_deviceMemory -= bytes_total_length/(1024.*1024.);
printfQuda("Device memory in used is %f MB D \n",G_deviceMemory);
}
}
void QKXTM_Field::destroy_host_ghost(){
#ifdef MULTI_GPU
if( (comm_size() > 1) ){
free(h_ext_ghost);
h_ext_ghost = NULL;
}
#endif
}
void QKXTM_Field::destroy_device_ghost(){
#ifdef MULTI_GPU
if( comm_size() > 1 ){
if(d_ext_ghost != NULL){
cudaFree(d_ext_ghost);
d_ext_ghost = NULL;
checkCudaError();
G_deviceMemory -= bytes_ghost_length/(1024.*1024.);
printfQuda("Device memory in used is %f MB D \n",G_deviceMemory);
}
}
#endif
}
void QKXTM_Field::create_all(){
create_host();
create_host_ghost();
create_device();
// create_device_ghost(); // with cudaMemcpy2D dont need it
zero();
}
void QKXTM_Field::destroy_all(){
destroy_host();
destroy_host_ghost();
destroy_device();
// destroy_device_ghost();
}
void QKXTM_Field::printInfo(){
printfQuda("GPU memory needed is %f MB \n",bytes_total_length/(1024.0 * 1024.0));
}
void QKXTM_Field::zero(){
memset(h_elem,0,bytes_total_length);
cudaMemset(d_elem,0,bytes_total_length);
checkCudaError();
}
void QKXTM_Field::fourierCorr(double *corr, double *corrMom, int Nmom , int momElem[][3]){
// corrMom must be allocated with G_localL[3]*Nmom*4*4*2
// slowest is time then momentum then gamma then gamma1 then r,i
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, fieldTex, corr, Bytes() );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
cudaMalloc((void**)&d_partial_block, gridDim.x*2 * sizeof(double) );
double reduction[2];
double globalReduction[2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
// fourierCorr_kernel<<<gridDim,blockDim>>>((double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
fourierPion_kernel<<<gridDim,blockDim>>>((double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] );
cudaDeviceSynchronize();
cudaMemcpy(h_partial_block , d_partial_block , gridDim.x*2 * sizeof(double) , cudaMemcpyDeviceToHost);
memset(reduction , 0 , 2 * sizeof(double) );
for(int i =0 ; i < gridDim.x ; i++){
reduction[0] += h_partial_block[ i*2 + 0];
reduction[1] += h_partial_block[ i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
corrMom[it*Nmom*2 + imom*2 + 0] = globalReduction[0];
corrMom[it*Nmom*2 + imom*2 + 1] = globalReduction[1];
}
}
} // for all momenta
} // for all local timeslice
cudaUnbindTexture(fieldTex);
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////// Class QKXTM_Gauge ////////////////////////////////////////
// $$ Section 8: Class QKXTM_Gauge $$
QKXTM_Gauge::QKXTM_Gauge():
gauge_binded_plaq(false) , packGauge_flag(false) , loadGauge_flag(false) , gauge_binded_ape(false), h_elem_backup(NULL)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nDim * G_nColor * G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Gauge::QKXTM_Gauge(NgaugeHost ngaugeHost):
gauge_binded_plaq(false) , packGauge_flag(false) , loadGauge_flag(false) , gauge_binded_ape(false), h_elem_backup(NULL)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nDim * G_nColor * G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
if(ngaugeHost == QKXTM_N1){
create_all();}
else{
create_all();
h_elem_backup =(double*) malloc(bytes_total_length);
if(h_elem_backup == NULL)errorQuda("Error allocate host memory for backup");
}
}
QKXTM_Gauge::~QKXTM_Gauge(){
destroy_all();
if(h_elem_backup != NULL)free(h_elem_backup);
if(gauge_binded_plaq == true) unbindGaugePlaq();
if(gauge_binded_ape == true) unbindGaugeAPE();
gauge_binded_plaq = false;
gauge_binded_ape = false;
}
void QKXTM_Gauge::bindGaugePlaq(){
if( gauge_binded_plaq == false ){
cudaBindTexture(0,gaugeTexPlaq,d_elem,bytes_total_length);
checkCudaError();
}
gauge_binded_plaq = true;
}
void QKXTM_Gauge::unbindGaugePlaq(){
if(gauge_binded_plaq == true){
cudaUnbindTexture(gaugeTexPlaq);
checkCudaError();
}
gauge_binded_plaq = false;
}
void QKXTM_Gauge::bindGaugeAPE(){
if( gauge_binded_ape == false ){
cudaBindTexture(0,gaugeTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
gauge_binded_ape = true;
}
void QKXTM_Gauge::unbindGaugeAPE(){
if(gauge_binded_ape == true){
cudaUnbindTexture(gaugeTexAPE);
checkCudaError();
}
gauge_binded_ape = false;
}
void QKXTM_Gauge::rebindGaugeAPE(){
cudaUnbindTexture(gaugeTexAPE);
cudaBindTexture(0,gaugeTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
void QKXTM_Gauge::packGauge(void **gauge){
// if(packGauge_flag == false){
double **p_gauge = (double**) gauge;
for(int dir = 0 ; dir < G_nDim ; dir++)
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
h_elem[dir*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part] = p_gauge[dir][iv*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part];
}
printfQuda("Gauge qkxTM packed on gpu form\n");
// packGauge_flag = true;
// }
}
void QKXTM_Gauge::loadGauge(){
//if((packGauge_flag == true) && (loadGauge_flag == false)){
cudaMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), cudaMemcpyHostToDevice );
checkCudaError();
// loadGauge_flag = true;
printfQuda("Gauge qkxTM loaded on gpu\n");
// }
}
void QKXTM_Gauge::justDownloadGauge(){
//if((packGauge_flag == true) && (loadGauge_flag == false)){
cudaMemcpy(h_elem,d_elem,(bytes_total_length - bytes_ghost_length), cudaMemcpyDeviceToHost );
checkCudaError();
// loadGauge_flag = true;
printfQuda("GaugeApe just downloaded\n");
// }
}
void QKXTM_Gauge::packGaugeToBackup(void **gauge){
double **p_gauge = (double**) gauge;
for(int dir = 0 ; dir < G_nDim ; dir++)
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
h_elem_backup[dir*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part] = p_gauge[dir][iv*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part];
}
printfQuda("Gauge qkxTM packed on gpu form on backupHost\n");
}
void QKXTM_Gauge::loadGaugeFromBackup(){
cudaMemcpy(d_elem,h_elem_backup,(bytes_total_length - bytes_ghost_length), cudaMemcpyHostToDevice );
checkCudaError();
printfQuda("Gauge qkxTM loaded on gpu from backupHost\n");
}
void QKXTM_Gauge::ghostToHost(){ // gpu collect ghost and send it to host
// direction x ////////////////////////////////////
#ifdef MULTI_GPU
if( G_localL[0] < G_totalL[0]){
int position;
int height = G_localL[1] * G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = 2*sizeof(double);
size_t spitch = G_localL[0]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = (G_localL[0]-1)*G_localL[1]*G_localL[2]*G_localL[3];
position = G_localL[0]-1;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[0]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[0]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction y ///////////////////////////////////
if( G_localL[1] < G_totalL[1]){
int position;
int height = G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[1]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*(G_localL[1]-1)*G_localL[2]*G_localL[3];
position = G_localL[0]*(G_localL[1]-1);
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[1]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[1]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction z //////////////////////////////////
if( G_localL[2] < G_totalL[2]){
int position;
int height = G_localL[3]; // number of blocks that we need
size_t width = G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[2]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*G_localL[1]*(G_localL[2]-1)*G_localL[3];
position = G_localL[0]*G_localL[1]*(G_localL[2]-1);
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[2]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int i = 0 ; i < G_nDim ; i++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + i*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[2]*G_nDim*G_nColor*G_nColor*2 + i*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// printfQuda("before copy device to host\n");
// direction t /////////////////////////////////////
if( G_localL[3] < G_totalL[3]){
int position;
int height = G_nDim*G_nColor*G_nColor;
size_t width = G_localL[2]*G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[3]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
position = G_localL[0]*G_localL[1]*G_localL[2]*(G_localL[3]-1);
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_minusGhost[3]*G_nDim*G_nColor*G_nColor*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
// set minus points to plus area
position = 0;
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_plusGhost[3]*G_nDim*G_nColor*G_nColor*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
checkCudaError();
#endif
}
void QKXTM_Gauge::cpuExchangeGhost(){ // cpus exchange links
#ifdef MULTI_GPU
if(comm_size() > 1){
MPI_Request request_recv[2*G_nDim];
MPI_Request request_send[2*G_nDim];
int back_nbr[4] = {X_BACK_NBR,Y_BACK_NBR,Z_BACK_NBR,T_BACK_NBR};
int fwd_nbr[4] = {X_FWD_NBR,Y_FWD_NBR,Z_FWD_NBR,T_FWD_NBR};
double *pointer_receive = NULL;
double *pointer_send = NULL;
// direction x
if(G_localL[0] < G_totalL[0]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[0]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[0]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[0]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[0], 0, &(request_recv[0]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[0], 0, &(request_send[0]));
comm_wait(&(request_recv[0])); // blocking until receive finish
comm_wait(&(request_send[0]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[0]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[0]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[0], 1, &(request_recv[1]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[0], 1, &(request_send[1]));
comm_wait(&(request_recv[1])); // blocking until receive finish
comm_wait(&(request_send[1]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction y
if(G_localL[1] < G_totalL[1]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[1]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[1]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[1]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[1], 2, &(request_recv[2]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[1], 2, &(request_send[2]));
comm_wait(&(request_recv[2])); // blocking until receive finish
comm_wait(&(request_send[2]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[1]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[1]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[1], 3, &(request_recv[3]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[1], 3, &(request_send[3]));
comm_wait(&(request_recv[3])); // blocking until receive finish
comm_wait(&(request_send[3]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction z
if(G_localL[2] < G_totalL[2]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[2]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[2]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[2]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[2], 4, &(request_recv[4]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[2], 4, &(request_send[4]));
comm_wait(&(request_recv[4])); // blocking until receive finish
comm_wait(&(request_send[4]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[2]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[2]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[2], 5, &(request_recv[5]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[2], 5, &(request_send[5]));
comm_wait(&(request_recv[5])); // blocking until receive finish
comm_wait(&(request_send[5]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction t
if(G_localL[3] < G_totalL[3]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
// printfQuda("Here\n");
size_t nbytes = G_surface3D[3]*G_nColor*G_nColor*G_nDim*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[3]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_minusGhost[3]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[3], 6, &(request_recv[6]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[3], 6, &(request_send[6]));
comm_wait(&(request_recv[6])); // blocking until receive finish
comm_wait(&(request_send[6]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[3]-G_localVolume)*G_nColor*G_nColor*G_nDim*2;
pointer_send = h_elem + G_plusGhost[3]*G_nColor*G_nColor*G_nDim*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[3], 7, &(request_recv[7]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[3], 7, &(request_send[7]));
comm_wait(&(request_recv[7])); // blocking until receive finish
comm_wait(&(request_send[7]));
pointer_receive = NULL;
pointer_send = NULL;
}
}
#endif
}
void QKXTM_Gauge::ghostToDevice(){ // simple cudamemcpy to send ghost to device
#ifdef MULTI_GPU
if(comm_size() > 1){
double *host = h_ext_ghost;
double *device = d_elem + G_localVolume*G_nColor*G_nColor*G_nDim*2;
cudaMemcpy(device,host,bytes_ghost_length,cudaMemcpyHostToDevice);
checkCudaError();
}
#endif
}
double QKXTM_Gauge::norm2Host(){
double res = 0.;
for(int i = 0 ; i < G_nDim*G_nColor*G_nColor*G_localVolume ; i++){
res += h_elem[i*2 + 0]*h_elem[i*2 + 0] + h_elem[i*2 + 1]*h_elem[i*2 + 1];
}
#ifdef MULTI_GPU
double globalRes;
int rc = MPI_Allreduce(&res , &globalRes , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalRes ;
#else
return res;
#endif
}
double QKXTM_Gauge::norm2Device(){
double *h_partial = NULL;
double *d_partial = NULL;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
h_partial = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial == NULL) errorQuda("Error allocate memory for host partial plaq");
cudaMalloc((void**)&d_partial, gridDim.x * sizeof(double));
cudaBindTexture(0,gaugeTexNorm2,d_elem, Bytes() - BytesGhost() );
norm2Gauge_kernel<<<gridDim,blockDim>>>(d_partial);
cudaDeviceSynchronize();
cudaUnbindTexture(gaugeTexNorm2);
cudaMemcpy(h_partial, d_partial , gridDim.x * sizeof(double) , cudaMemcpyDeviceToHost);
double norm2 = 0.;
// simple host reduction
for(int i = 0 ; i < gridDim.x ; i++)
norm2 += h_partial[i];
free(h_partial);
cudaFree(d_partial);
h_partial = NULL;
d_partial = NULL;
checkCudaError();
#ifdef MULTI_GPU
double globalNorm2;
int rc = MPI_Allreduce(&norm2 , &globalNorm2 , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for norm2");
return globalNorm2 ;
#else
return norm2;
#endif
}
double QKXTM_Gauge::calculatePlaq(){
if(gauge_binded_plaq == false) bindGaugePlaq();
// if(packGauge_flag == false) packGauge(gauge); // you must to do it in the executable because I will calculate plaquette for APE gauge
// if(loadGauge_flag == false) loadGauge();
ghostToHost(); // collect surface from device and send it to host
// comm_barrier();
cpuExchangeGhost(); // cpus exchange surfaces with previous and forward proc all dir
ghostToDevice(); // now the host send surface to device
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *h_partial_plaq = NULL;
double *d_partial_plaq = NULL;
h_partial_plaq = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial_plaq == NULL) errorQuda("Error allocate memory for host partial plaq");
cudaMalloc((void**)&d_partial_plaq, gridDim.x * sizeof(double));
// cudaPrintfInit();
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
calculatePlaq_kernel<<<gridDim,blockDim>>>(d_partial_plaq);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
// if(comm_rank() == 0) cudaPrintfDisplay(stdout,true);
// cudaPrintfEnd();
cudaEventDestroy(start);
cudaEventDestroy(stop);
// printfQuda("Elapsed time for plaquette kernel is %f ms\n",elapsedTime);
// now copy partial plaq to host
cudaMemcpy(h_partial_plaq, d_partial_plaq , gridDim.x * sizeof(double) , cudaMemcpyDeviceToHost);
double plaquette = 0.;
#ifdef MULTI_GPU
double globalPlaquette = 0.;
#endif
// simple host reduction on plaq
for(int i = 0 ; i < gridDim.x ; i++)
plaquette += h_partial_plaq[i];
free(h_partial_plaq);
cudaFree(d_partial_plaq);
h_partial_plaq = NULL;
d_partial_plaq = NULL;
checkCudaError();
unbindGaugePlaq();
#ifdef MULTI_GPU
int rc = MPI_Allreduce(&plaquette , &globalPlaquette , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalPlaquette/(G_totalVolume*G_nColor*6) ;
#else
return plaquette/(G_totalVolume*G_nColor*6);
#endif
}
void QKXTM_Gauge::checkSum(){
justDownloadGauge(); //gpuformat
double *M = H_elem();
double sum_real,sum_imag;
sum_real = 0.;
sum_imag = 0.;
int mu =0;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
mu =1;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
mu =2;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
mu =3;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < 32 ; t++)
for(int z = 0 ; z < 16 ; z++)
for(int y = 0 ; y < 16 ; y++)
for(int x = 0 ; x < 16 ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int position = x + 16*y + 16*16*z + 16*16*16*t + 16*16*16*32*c2 + 16*16*16*32*3*c1 + 16*16*16*32*3*3*mu;
sum_real += M[position*2 + 0];
sum_imag += M[position*2 + 1];
}
printf("%+e %+e\n",sum_real,sum_imag);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void quda::UxMomentumPhase(QKXTM_Gauge &gaugeAPE, int px, int py, int pz, double zeta){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
UxMomentumPhase_kernel<<<gridDim,blockDim>>>((double2*) gaugeAPE.D_elem(), px, py, pz, zeta);
cudaDeviceSynchronize();
checkCudaError();
}
void quda::APE_smearing(QKXTM_Gauge &gaugeAPE , QKXTM_Gauge &gaugeTmp){// this is a function not a routine which perform smearing , need two QKXTM_Gauge objects
// if(G_nsmearAPE == 0) errorQuda("You cant call APE_smearing with G_nsmearAPE = 0"); // for G_nsmearAPE == 0 just copy to APE
QKXTM_Propagator *prop = new QKXTM_Propagator(); // the constructor allocate memory on gpu for propagator I will use it for staple
QKXTM_Propagator &prp = *prop; // take reference class
#ifdef _PROPAGATOR_APE_TEX
prp.bindPropagatorAPE(); // need to bind propagator to texture because it will be input in kernel2
#endif
// create pointer to classes , only pointer no memory allocation because I didnt call the construnctor
QKXTM_Gauge *in = NULL;
QKXTM_Gauge *out = NULL;
QKXTM_Gauge *tmp = NULL;
in = &(gaugeTmp);
out = &(gaugeAPE);
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
printfQuda("Perform APE smearing\n");
for(int iter = 0 ; iter < G_nsmearAPE ; iter++){
//rebind texture to "in" gauge field
in->rebindGaugeAPE(); // now texture show to "in" gauge
//communicate "in" gauge field
in->ghostToHost();
in->cpuExchangeGhost(); // perform communication of the gauge
in->ghostToDevice();
// cudaPrintfInit();
//kernel_1 first phase of APE smearing
APE_kernel_1<<<gridDim,blockDim>>>((double2*) prp.D_elem() ,(double2*) out->D_elem() );
cudaDeviceSynchronize(); // we need to block until the kernel finish
//communicate propagator
prp.ghostToHost();
prp.cpuExchangeGhost(); // perform communication of the gauge in propagator structure
prp.ghostToDevice();
//kernel_2 second phase of APE smearing and SU3 projection
#ifdef _PROPAGATOR_APE_TEX
APE_kernel_2<<<gridDim,blockDim>>>((double2*) out->D_elem() );
#else
APE_kernel_2<<<gridDim,blockDim>>>((double2*) prp.D_elem(),(double2*) out->D_elem() );
#endif
cudaDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
tmp=in;
in=out;
out=tmp; // swap glasses
checkCudaError();
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
// printfQuda("Elapsed time for APE smearing kernel is %f ms\n",elapsedTime);
if((G_nsmearAPE%2) == 0){
out->unbindGaugeAPE();
cudaMemcpy(gaugeAPE.D_elem(),gaugeTmp.D_elem(), gaugeAPE.Bytes() - gaugeAPE.BytesGhost(), cudaMemcpyDeviceToDevice);
}
else{
out->unbindGaugeAPE();
cudaMemcpy(gaugeAPE.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , gaugeTmp.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , G_nColor*G_nColor*G_localVolume*2*sizeof(double) , cudaMemcpyDeviceToDevice);
}
checkCudaError();
delete prop;
}
void quda::APE_smearing(QKXTM_Gauge &gaugeAPE , QKXTM_Gauge &gaugeTmp, QKXTM_Propagator &prp){// this is a function not a routine which perform smearing , need two QKXTM_Gauge objects
// if(G_nsmearAPE == 0) errorQuda("You cant call APE_smearing with G_nsmearAPE = 0"); // for G_nsmearAPE == 0 just copy to APE
// QKXTM_Propagator *prop = new QKXTM_Propagator(); // the constructor allocate memory on gpu for propagator I will use it for staple
// QKXTM_Propagator &prp = *prop; // take reference class
if(G_nsmearAPE == 0) return;
#ifdef _PROPAGATOR_APE_TEX
prp.bindPropagatorAPE(); // need to bind propagator to texture because it will be input in kernel2
#endif
// create pointer to classes , only pointer no memory allocation because I didnt call the construnctor
QKXTM_Gauge *in = NULL;
QKXTM_Gauge *out = NULL;
QKXTM_Gauge *tmp = NULL;
in = &(gaugeTmp);
out = &(gaugeAPE);
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
printfQuda("Perform APE smearing\n");
for(int iter = 0 ; iter < G_nsmearAPE ; iter++){
//rebind texture to "in" gauge field
in->rebindGaugeAPE(); // now texture show to "in" gauge
//communicate "in" gauge field
in->ghostToHost();
in->cpuExchangeGhost(); // perform communication of the gauge
in->ghostToDevice();
cudaPrintfInit();
//kernel_1 first phase of APE smearing
APE_kernel_1<<<gridDim,blockDim>>>((double2*) prp.D_elem() ,(double2*) out->D_elem() );
cudaDeviceSynchronize(); // we need to block until the kernel finish
//communicate propagator
prp.ghostToHost();
prp.cpuExchangeGhost(); // perform communication of the gauge in propagator structure
prp.ghostToDevice();
//kernel_2 second phase of APE smearing and SU3 projection
#ifdef _PROPAGATOR_APE_TEX
APE_kernel_2<<<gridDim,blockDim>>>((double2*) out->D_elem() );
#else
APE_kernel_2<<<gridDim,blockDim>>>((double2*) prp.D_elem() ,(double2*) out->D_elem() );
#endif
cudaDeviceSynchronize();
if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
cudaPrintfEnd();
tmp=in;
in=out;
out=tmp; // swap glasses
checkCudaError();
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
prp.unbindPropagatorAPE();
// printfQuda("Elapsed time for APE smearing kernel is %f ms\n",elapsedTime);
if((G_nsmearAPE%2) == 0){
out->unbindGaugeAPE();
cudaMemcpy(gaugeAPE.D_elem(),gaugeTmp.D_elem(), gaugeAPE.Bytes() - gaugeAPE.BytesGhost(), cudaMemcpyDeviceToDevice);
}
else{
out->unbindGaugeAPE();
cudaMemcpy(gaugeAPE.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , gaugeTmp.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , G_nColor*G_nColor*G_localVolume*2*sizeof(double) , cudaMemcpyDeviceToDevice);
}
checkCudaError();
// delete prop;
// printfQuda("after delete prop\n");
}
void quda::HYP3D_smearing(QKXTM_Gauge &gaugeHYP , QKXTM_Gauge &gaugeTmp, QKXTM_Propagator &prp1, QKXTM_Propagator &prp2){// this is a function not a routine which perform smearing , need two QKXTM_Gauge objects
// cudaBindTexture(0,gaugeTexAPE,d_elem,bytes_total_length);
// cudaBindTexture(0,propagatorTexAPE,d_elem,bytes_total_length);
if(G_nsmearHYP == 0)return;
// create pointer to classes , only pointer no memory allocation because I didnt call the construnctor
QKXTM_Gauge *in = NULL;
QKXTM_Gauge *out = NULL;
QKXTM_Gauge *tmp = NULL;
in = &(gaugeTmp);
out = &(gaugeHYP);
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
printfQuda("Perform HYP 3D smearing\n");
for(int iter = 0 ; iter < G_nsmearHYP ; iter++){
//step 1
cudaBindTexture(0,gaugeTexHYP,in->D_elem(),in->Bytes());
in->ghostToHost();
in->cpuExchangeGhost();
in->ghostToDevice();
//step2
HYP3D_kernel_1<<<gridDim,blockDim>>>((double2*) prp1.D_elem());
cudaDeviceSynchronize();
//step3
cudaBindTexture(0,propagatorTexHYP,prp1.D_elem(),prp1.Bytes());
prp1.ghostToHost();
prp1.cpuExchangeGhost();
prp1.ghostToDevice();
//step4
HYP3D_kernel_2<<<gridDim,blockDim>>>((double2*) prp2.D_elem() );
cudaDeviceSynchronize();
//step5
cudaUnbindTexture(propagatorTexHYP);
cudaBindTexture(0,propagatorTexHYP,prp2.D_elem(),prp2.Bytes());
HYP3D_kernel_3<<<gridDim,blockDim>>>((double2*) prp2.D_elem(),G_omega2HYP);
cudaDeviceSynchronize();
prp2.ghostToHost();
prp2.cpuExchangeGhost();
prp2.ghostToDevice();
// check the sum
//step6
HYP3D_kernel_4<<<gridDim,blockDim>>>((double2*) prp1.D_elem(),(double2*) out->D_elem());
cudaDeviceSynchronize();
// out->checkSum();
//step7
cudaUnbindTexture(propagatorTexHYP);
cudaBindTexture(0,propagatorTexHYP,prp1.D_elem(),prp1.Bytes());
prp1.ghostToHost();
prp1.cpuExchangeGhost();
prp1.ghostToDevice();
//step8
HYP3D_kernel_5<<<gridDim,blockDim>>>((double2*)out->D_elem(),G_omega1HYP);
cudaDeviceSynchronize();
//step9
cudaUnbindTexture(propagatorTexHYP);
cudaUnbindTexture(gaugeTexHYP);
tmp=in;
in=out;
out=tmp; // swap classes
checkCudaError();
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
if((G_nsmearHYP%2) == 0){
cudaMemcpy(gaugeHYP.D_elem(),gaugeTmp.D_elem(), gaugeHYP.Bytes() - gaugeHYP.BytesGhost(), cudaMemcpyDeviceToDevice);
}
else{
cudaMemcpy(gaugeHYP.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , gaugeTmp.D_elem() + 3*G_nColor*G_nColor*G_localVolume*2 , G_nColor*G_nColor*G_localVolume*2*sizeof(double) , cudaMemcpyDeviceToDevice);
}
checkCudaError();
}
double* quda::createWilsonPath(QKXTM_Gauge &gauge,int direction ){
double* deviceWilsonPath = NULL;
cudaBindTexture(0,gaugePath,gauge.D_elem(),gauge.Bytes());
checkCudaError();
cudaMalloc((void**)&deviceWilsonPath,(G_localVolume*9*G_totalL[direction]/2)*2*sizeof(double) ); //we choose z direction and \Delta{Z} until the half spatial direction
checkCudaError();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
createWilsonPath_kernel<<<gridDim,blockDim>>>((double2*) deviceWilsonPath,direction);
cudaDeviceSynchronize();
cudaUnbindTexture(gaugePath);
checkCudaError();
return deviceWilsonPath;
}
double* quda::createWilsonPathBwd(QKXTM_Gauge &gauge,int direction ){
double* deviceWilsonPath = NULL;
cudaBindTexture(0,gaugePath,gauge.D_elem(),gauge.Bytes());
checkCudaError();
cudaMalloc((void**)&deviceWilsonPath,(G_localVolume*9*G_totalL[direction]/2)*2*sizeof(double) ); //we choose z direction and \Delta{Z} until the half spatial direction
checkCudaError();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
createWilsonPathBwd_kernel<<<gridDim,blockDim>>>((double2*) deviceWilsonPath,direction);
cudaDeviceSynchronize();
cudaUnbindTexture(gaugePath);
checkCudaError();
return deviceWilsonPath;
}
double* quda::createWilsonPath(QKXTM_Gauge &gauge){
double* deviceWilsonPath = NULL;
cudaBindTexture(0,gaugePath,gauge.D_elem(),gauge.Bytes());
checkCudaError();
if( (G_totalL[0] != G_totalL[1]) || (G_totalL[0] != G_totalL[2])){
printfQuda("Lattice length must be equal in spatial directions\n");
}
cudaMalloc((void**)&deviceWilsonPath,(G_localVolume*9*(G_totalL[0]/2)*3)*2*sizeof(double) ); //we choose z direction and \Delta{Z} until the half spatial direction
checkCudaError();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
createWilsonPath_kernel_all<<<gridDim,blockDim>>>((double2*) deviceWilsonPath);
cudaDeviceSynchronize();
cudaUnbindTexture(gaugePath);
checkCudaError();
return deviceWilsonPath;
}
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////// class QKXTM_Vector ////////////////////////////////////////////////////////////////
// $$ Section 9: Class QKXTM_Vector $$
QKXTM_Vector::QKXTM_Vector():
vector_binded_gauss(false) , packVector_flag(false) , loadVector_flag(false)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Vector::~QKXTM_Vector(){
destroy_all();
if(vector_binded_gauss == true) unbindVectorGauss();
vector_binded_gauss = false;
}
void QKXTM_Vector::packVector(void *vector){
if(packVector_flag == false){
double *p_vector = (double*) vector;
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int part = 0 ; part < 2 ; part++){
h_elem[mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + iv*2 + part] = p_vector[iv*G_nSpin*G_nColor*2 + mu*G_nColor*2 + c1*2 + part];
}
printfQuda("Vector qkxTM packed on gpu form\n");
packVector_flag = true;
}
}
void QKXTM_Vector::loadVector(){
if(packVector_flag == true && loadVector_flag == false){
cudaMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), cudaMemcpyHostToDevice );
checkCudaError();
loadVector_flag = true;
printfQuda("Vector qkxTM loaded on gpu\n");
}
}
void QKXTM_Vector::ghostToHost(){ // gpu collect ghost and send it to host
// direction x ////////////////////////////////////
#ifdef MULTI_GPU
if( G_localL[0] < G_totalL[0]){
int position;
int height = G_localL[1] * G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = 2*sizeof(double);
size_t spitch = G_localL[0]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = (G_localL[0]-1)*G_localL[1]*G_localL[2]*G_localL[3];
position = (G_localL[0]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[0]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[0]*2 + c1*G_surface3D[0]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[0]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[0]*2 + c1*G_surface3D[0]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction y ///////////////////////////////////
if( G_localL[1] < G_totalL[1]){
int position;
int height = G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[1]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*(G_localL[1]-1)*G_localL[2]*G_localL[3];
position = G_localL[0]*(G_localL[1]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[1]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[1]*2 + c1*G_surface3D[1]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[1]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[1]*2 + c1*G_surface3D[1]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction z //////////////////////////////////
if( G_localL[2] < G_totalL[2]){
int position;
int height = G_localL[3]; // number of blocks that we need
size_t width = G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[2]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*G_localL[1]*(G_localL[2]-1)*G_localL[3];
position = G_localL[0]*G_localL[1]*(G_localL[2]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[2]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[2]*2 + c1*G_surface3D[2]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
d_elem_offset = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[2]*G_nSpin*G_nColor*2 + mu*G_nColor*G_surface3D[2]*2 + c1*G_surface3D[2]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction t /////////////////////////////////////
if( G_localL[3] < G_totalL[3]){
int position;
int height = G_nSpin*G_nColor;
size_t width = G_localL[2]*G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[3]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
position = G_localL[0]*G_localL[1]*G_localL[2]*(G_localL[3]-1);
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_minusGhost[3]*G_nSpin*G_nColor*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
// set minus points to plus area
position = 0;
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_plusGhost[3]*G_nSpin*G_nColor*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
#endif
}
void QKXTM_Vector::cpuExchangeGhost(){ // cpus exchange links
#ifdef MULTI_GPU
if(comm_size() > 1){
MPI_Request request_recv[2*G_nDim];
MPI_Request request_send[2*G_nDim];
int back_nbr[4] = {X_BACK_NBR,Y_BACK_NBR,Z_BACK_NBR,T_BACK_NBR};
int fwd_nbr[4] = {X_FWD_NBR,Y_FWD_NBR,Z_FWD_NBR,T_FWD_NBR};
// direction x
if(G_localL[0] < G_totalL[0]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[0]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[0]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[0]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[0], 0, &(request_recv[0]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[0], 0, &(request_send[0]));
comm_wait(&(request_recv[0])); // blocking until receive finish
comm_wait(&(request_send[0]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[0]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[0]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[0], 1, &(request_recv[1]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[0], 1, &(request_send[1]));
comm_wait(&(request_recv[1])); // blocking until receive finish
comm_wait(&(request_send[1]));
}
// direction y
if(G_localL[1] < G_totalL[1]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[1]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[1]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[1]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[1], 2, &(request_recv[2]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[1], 2, &(request_send[2]));
comm_wait(&(request_recv[2])); // blocking until receive finish
comm_wait(&(request_send[2]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[1]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[1]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[1], 3, &(request_recv[3]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[1], 3, &(request_send[3]));
comm_wait(&(request_recv[3])); // blocking until receive finish
comm_wait(&(request_send[3]));
}
// direction z
if(G_localL[2] < G_totalL[2]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[2]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[2]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[2]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[2], 4, &(request_recv[4]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[2], 4, &(request_send[4]));
comm_wait(&(request_recv[4])); // blocking until receive finish
comm_wait(&(request_send[4]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[2]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[2]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[2], 5, &(request_recv[5]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[2], 5, &(request_send[5]));
comm_wait(&(request_recv[5])); // blocking until receive finish
comm_wait(&(request_send[5]));
}
// direction t
if(G_localL[3] < G_totalL[3]){
double *pointer_receive = NULL;
double *pointer_send = NULL;
size_t nbytes = G_surface3D[3]*G_nSpin*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[3]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_minusGhost[3]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[3], 6, &(request_recv[6]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[3], 6, &(request_send[6]));
comm_wait(&(request_recv[6])); // blocking until receive finish
comm_wait(&(request_send[6]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[3]-G_localVolume)*G_nSpin*G_nColor*2;
pointer_send = h_elem + G_plusGhost[3]*G_nSpin*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[3], 7, &(request_recv[7]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[3], 7, &(request_send[7]));
comm_wait(&(request_recv[7])); // blocking until receive finish
comm_wait(&(request_send[7]));
}
}
#endif
}
void QKXTM_Vector::ghostToDevice(){ // simple cudamemcpy to send ghost to device
#ifdef MULTI_GPU
if(comm_size() > 1){
double *host = h_ext_ghost;
double *device = d_elem + G_localVolume*G_nSpin*G_nColor*2;
cudaMemcpy(device,host,bytes_ghost_length,cudaMemcpyHostToDevice);
checkCudaError();
}
#endif
}
void QKXTM_Vector::bindVectorGauss(){
if( vector_binded_gauss == false ){
cudaBindTexture(0,vectorTexGauss,d_elem,bytes_total_length);
checkCudaError();
}
vector_binded_gauss = true;
}
void QKXTM_Vector::unbindVectorGauss(){
if(vector_binded_gauss == true){
cudaUnbindTexture(vectorTexGauss);
checkCudaError();
}
vector_binded_gauss = false;
}
void QKXTM_Vector::rebindVectorGauss(){
cudaUnbindTexture(vectorTexGauss);
cudaBindTexture(0,vectorTexGauss,d_elem,bytes_total_length);
checkCudaError();
}
void QKXTM_Vector::download(){
cudaMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , cudaMemcpyDeviceToHost);
checkCudaError();
double *vector_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(vector_tmp == NULL)errorQuda("Error in allocate memory of tmp vector");
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int part = 0 ; part < 2 ; part++){
vector_tmp[iv*G_nSpin*G_nColor*2 + mu*G_nColor*2 + c1*2 + part] = h_elem[mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + iv*2 + part];
}
memcpy(h_elem,vector_tmp,Bytes() - BytesGhost());
free(vector_tmp);
vector_tmp = NULL;
}
double QKXTM_Vector::norm2Host(){
double res = 0.;
for(int i = 0 ; i < G_nSpin*G_nColor*G_localVolume ; i++){
res += h_elem[i*2 + 0]*h_elem[i*2 + 0] + h_elem[i*2 + 1]*h_elem[i*2 + 1];
}
#ifdef MULTI_GPU
double globalRes;
int rc = MPI_Allreduce(&res , &globalRes , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalRes ;
#else
return res;
#endif
}
double QKXTM_Vector::norm2Device(){
double *h_partial = NULL;
double *d_partial = NULL;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
h_partial = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial == NULL) errorQuda("Error allocate memory for host partial plaq");
cudaMalloc((void**)&d_partial, gridDim.x * sizeof(double));
cudaBindTexture(0,vectorTexNorm2,d_elem, Bytes() - BytesGhost() );
norm2Vector_kernel<<<gridDim,blockDim>>>(d_partial);
cudaDeviceSynchronize();
cudaUnbindTexture(vectorTexNorm2);
cudaMemcpy(h_partial, d_partial , gridDim.x * sizeof(double) , cudaMemcpyDeviceToHost);
double norm2 = 0.;
// simple host reduction
for(int i = 0 ; i < gridDim.x ; i++)
norm2 += h_partial[i];
free(h_partial);
cudaFree(d_partial);
h_partial = NULL;
d_partial = NULL;
checkCudaError();
#ifdef MULTI_GPU
double globalNorm2;
int rc = MPI_Allreduce(&norm2 , &globalNorm2 , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for norm2");
return globalNorm2 ;
#else
return norm2;
#endif
}
void QKXTM_Vector::uploadToCuda(cudaColorSpinorField &cudaVector){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
// cudaPrintfInit();
uploadToCuda_kernel<<<gridDim,blockDim>>>( (double2*) d_elem , (double2*) pointEven, (double2*) pointOdd);
//cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::downloadFromCuda(cudaColorSpinorField &cudaVector){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
downloadFromCuda_kernel<<<gridDim,blockDim>>>( (double2*) d_elem , (double2*) pointEven, (double2*) pointOdd);
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::flagsToFalse(){
packVector_flag = false;
loadVector_flag = false;
}
void QKXTM_Vector::scaleVector(double a){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
scaleVector_kernel<<<gridDim,blockDim>>>((double2*) d_elem, a);
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::copyPropagator3D(QKXTM_Propagator3D &prop, int timeslice, int nu , int c2){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + timeslice*G_localVolume3D*2 ;
pointer_src = prop.D_elem() + mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2;
cudaMemcpy(pointer_dst, pointer_src, G_localVolume3D*2 * sizeof(double), cudaMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Vector::copyPropagator(QKXTM_Propagator &prop, int nu , int c2){
double *pointer_src = NULL;
double *pointer_dst = NULL;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 ;
pointer_src = prop.D_elem() + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2;
cudaMemcpy(pointer_dst, pointer_src, G_localVolume*2 *sizeof(double), cudaMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Vector::conjugate(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
conjugate_vector_kernel<<<gridDim,blockDim>>>( (double2*) D_elem() );
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::applyGamma5(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
apply_gamma5_vector_kernel<<<gridDim,blockDim>>>( (double2*) D_elem() );
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::applyGammaTransformation(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
apply_gamma_transf_vector_kernel<<<gridDim,blockDim>>>( (double2*) D_elem() );
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::applyMomentum(int nx, int ny, int nz){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
apply_momentum_kernel<<<gridDim,blockDim>>>( (double2*) D_elem() , nx , ny , nz);
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Vector::getVectorProp3D(QKXTM_Propagator3D &prop1, int timeslice,int nu,int c2){
// cudaPrintfInit();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
//cudaPrintfInit();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, propagator3DTex1, prop1.D_elem(), prop1.Bytes());
getVectorProp3D_kernel<<<gridDim,blockDim>>>( (double2*) this->D_elem(), timeslice , nu, c2);
cudaDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
cudaUnbindTexture(propagator3DTex1);
// cudaPrintfEnd();
checkCudaError();
}
// new addition
#include <lime.h>
static void qcd_swap_8(double *Rd, int N)
{
register char *i,*j,*k;
char swap;
char *max;
char *R = (char*) Rd;
max = R+(N<<3);
for(i=R;i<max;i+=8)
{
j=i; k=j+7;
swap = *j; *j = *k; *k = swap;
j++; k--;
swap = *j; *j = *k; *k = swap;
j++; k--;
swap = *j; *j = *k; *k = swap;
j++; k--;
swap = *j; *j = *k; *k = swap;
}
}
static int qcd_isBigEndian()
{
union{
char C[4];
int R ;
}word;
word.R=1;
if(word.C[3]==1) return 1;
if(word.C[0]==1) return 0;
return -1;
}
void QKXTM_Vector::write(char *filename){
FILE *fid;
int error_in_header=0;
LimeWriter *limewriter;
LimeRecordHeader *limeheader = NULL;
int ME_flag=0, MB_flag=0, limeStatus;
u_int64_t message_length;
MPI_Offset offset;
MPI_Datatype subblock; //MPI-type, 5d subarray
MPI_File mpifid;
MPI_Status status;
int sizes[5], lsizes[5], starts[5];
long int i;
int chunksize,mu,c1;
char *buffer;
int x,y,z,t;
char tmp_string[2048];
if(comm_rank() == 0){ // master will write the lime header
fid = fopen(filename,"w");
if(fid == NULL){
fprintf(stderr,"Error open file to write propagator in %s \n",__func__);
comm_exit(-1);
}
else{
limewriter = limeCreateWriter(fid);
if(limewriter == (LimeWriter*)NULL) {
fprintf(stderr, "Error in %s. LIME error in file for writing! in %s\n", __func__);
error_in_header=1;
comm_exit(-1);
}
else
{
sprintf(tmp_string, "DiracFermion_Sink");
message_length=(long int) strlen(tmp_string);
MB_flag=1; ME_flag=1;
limeheader = limeCreateHeader(MB_flag, ME_flag, "propagator-type", message_length);
if(limeheader == (LimeRecordHeader*)NULL)
{
fprintf(stderr, "Error in %s. LIME create header error.\n", __func__);
error_in_header=1;
comm_exit(-1);
}
limeStatus = limeWriteRecordHeader(limeheader, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
limeDestroyHeader(limeheader);
limeStatus = limeWriteRecordData(tmp_string, &message_length, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header error %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
sprintf(tmp_string, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<etmcFormat>\n\t<field>diracFermion</field>\n\t<precision>64</precision>\n\t<flavours>1</flavours>\n\t<lx>%d</lx>\n\t<ly>%d</ly>\n\t<lz>%d</lz>\n\t<lt>%d</lt>\n\t<spin>4</spin>\n\t<colour>3</colour>\n</etmcFormat>", G_totalL[0], G_totalL[1], G_totalL[2], G_totalL[3]);
message_length=(long int) strlen(tmp_string);
MB_flag=1; ME_flag=1;
limeheader = limeCreateHeader(MB_flag, ME_flag, "quda-propagator-format", message_length);
if(limeheader == (LimeRecordHeader*)NULL)
{
fprintf(stderr, "Error in %s. LIME create header error.\n", __func__);
error_in_header=1;
comm_exit(-1);
}
limeStatus = limeWriteRecordHeader(limeheader, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
limeDestroyHeader(limeheader);
limeStatus = limeWriteRecordData(tmp_string, &message_length, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header error %d\n", __func__, limeStatus);
error_in_header=1;
comm_exit(-1);
}
message_length = G_totalVolume*4*3*2*sizeof(double);
MB_flag=1; ME_flag=1;
limeheader = limeCreateHeader(MB_flag, ME_flag, "scidac-binary-data", message_length);
limeStatus = limeWriteRecordHeader( limeheader, limewriter);
if(limeStatus < 0 )
{
fprintf(stderr, "Error in %s. LIME write header error %d\n", __func__, limeStatus);
error_in_header=1;
}
limeDestroyHeader( limeheader );
}
message_length=1;
limeWriteRecordData(tmp_string, &message_length, limewriter);
limeDestroyWriter(limewriter);
offset = ftell(fid)-1;
fclose(fid);
}
}
MPI_Bcast(&offset,sizeof(MPI_Offset),MPI_BYTE,0,MPI_COMM_WORLD);
sizes[0]=G_totalL[3];
sizes[1]=G_totalL[2];
sizes[2]=G_totalL[1];
sizes[3]=G_totalL[0];
sizes[4]=4*3*2;
lsizes[0]=G_localL[3];
lsizes[1]=G_localL[2];
lsizes[2]=G_localL[1];
lsizes[3]=G_localL[0];
lsizes[4]=sizes[4];
starts[0]=comm_coords(3)*G_localL[3];
starts[1]=comm_coords(2)*G_localL[2];
starts[2]=comm_coords(1)*G_localL[1];
starts[3]=comm_coords(0)*G_localL[0];
starts[4]=0;
MPI_Type_create_subarray(5,sizes,lsizes,starts,MPI_ORDER_C,MPI_DOUBLE,&subblock);
MPI_Type_commit(&subblock);
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifid);
MPI_File_set_view(mpifid, offset, MPI_FLOAT, subblock, "native", MPI_INFO_NULL);
chunksize=4*3*2*sizeof(double);
buffer = (char*) malloc(chunksize*G_localVolume);
if(buffer==NULL)
{
fprintf(stderr,"Error in %s! Out of memory\n", __func__);
comm_exit(-1);
}
i=0;
for(t=0; t<G_localL[3];t++)
for(z=0; z<G_localL[2];z++)
for(y=0; y<G_localL[1];y++)
for(x=0; x<G_localL[0];x++)
for(mu=0; mu<4; mu++)
for(c1=0; c1<3; c1++) // works only for QUDA_DIRAC_ORDER (color inside spin)
{
((double *)buffer)[i] = h_elem[t*G_localL[2]*G_localL[1]*G_localL[0]*4*3*2 + z*G_localL[1]*G_localL[0]*4*3*2 + y*G_localL[0]*4*3*2 + x*4*3*2 + mu*3*2 + c1*2 + 0];
((double *)buffer)[i+1] = h_elem[t*G_localL[2]*G_localL[1]*G_localL[0]*4*3*2 + z*G_localL[1]*G_localL[0]*4*3*2 + y*G_localL[0]*4*3*2 + x*4*3*2 + mu*3*2 + c1*2 + 1];
i+=2;
}
if(!qcd_isBigEndian())
qcd_swap_8((double*) buffer,2*4*3*G_localVolume);
MPI_File_write_all(mpifid, buffer, 4*3*2*G_localVolume, MPI_DOUBLE, &status);
free(buffer);
MPI_File_close(&mpifid);
MPI_Type_free(&subblock);
}
//
void quda::Gaussian_smearing(QKXTM_Vector &vectorGauss , QKXTM_Vector &vectorTmp , QKXTM_Gauge &gaugeAPE){
// if(G_nsmearGauss == 0) errorQuda("You cant run Gaussian_smearing with G_nsmearGauss = 0"); // for G_nsmearGauss == 0 just copy
// first communicate APE gauge
gaugeAPE.ghostToHost();
gaugeAPE.cpuExchangeGhost();
gaugeAPE.ghostToDevice();
QKXTM_Vector *in = NULL;
QKXTM_Vector *out = NULL;
QKXTM_Vector *tmp = NULL;
in = &(vectorTmp);
out = &(vectorGauss);
gaugeAPE.bindGaugeAPE();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
// cudaPrintfInit();
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
printfQuda("Permform Gaussian smearing\n");
for(int iter = 0 ; iter < G_nsmearGauss ; iter++){
in->ghostToHost();
in->cpuExchangeGhost();
in->ghostToDevice();
in->rebindVectorGauss();
Gauss_kernel<<<gridDim,blockDim>>>((double2*) out->D_elem());
// cudaPrintfDisplay(stdout,true);
cudaDeviceSynchronize();
checkCudaError();
tmp = in;
in = out;
out = tmp;
}
// cudaPrintfEnd();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
// printfQuda("Elapsed time for APE smearing kernel is %f ms\n",elapsedTime);
if( (G_nsmearGauss%2) == 0){
cudaMemcpy(vectorGauss.D_elem() , vectorTmp.D_elem() , vectorGauss.Bytes() - vectorGauss.BytesGhost() , cudaMemcpyDeviceToDevice);
}
gaugeAPE.unbindGaugeAPE();
}
void quda::seqSourceFixSinkPart1(QKXTM_Vector &vec, QKXTM_Propagator3D &prop1, QKXTM_Propagator3D &prop2, int timeslice,int nu,int c2, whatProjector typeProj , whatParticle testParticle){
// cudaPrintfInit();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
//cudaPrintfInit();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, propagator3DTex1, prop1.D_elem(), prop1.Bytes());
cudaBindTexture(0, propagator3DTex2, prop2.D_elem(), prop2.Bytes());
seqSourceFixSinkPart1_kernel<<<gridDim,blockDim>>>( (double2*) vec.D_elem(), timeslice , nu, c2, typeProj , testParticle );
cudaDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
cudaUnbindTexture(propagator3DTex1);
cudaUnbindTexture(propagator3DTex2);
// cudaPrintfEnd();
checkCudaError();
}
void quda::seqSourceFixSinkPart2(QKXTM_Vector &vec, QKXTM_Propagator3D &prop1, int timeslice,int nu,int c2, whatProjector typeProj, whatParticle testParticle){
// cudaPrintfInit();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
//cudaPrintfEnd();
//cudaPrintfInit();
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, propagator3DTex1, prop1.D_elem(), prop1.Bytes());
seqSourceFixSinkPart2_kernel<<<gridDim,blockDim>>>( (double2*) vec.D_elem(), timeslice , nu, c2, typeProj, testParticle );
cudaDeviceSynchronize();
// if(comm_rank() == 0)cudaPrintfDisplay(stdout,true);
cudaUnbindTexture(propagator3DTex1);
// cudaPrintfEnd();
checkCudaError();
}
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------------
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////// class QKXTM_Propagator ////////////////////////////////////////////////////////////////
// $$ Section 10: Class QKXTM_Propagator $$
QKXTM_Propagator::QKXTM_Propagator():
propagator_binded_ape(false) , packPropagator_flag(false) , loadPropagator_flag(false)
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nSpin*G_nColor*G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Propagator::~QKXTM_Propagator(){
destroy_all();
if(propagator_binded_ape == true) unbindPropagatorAPE();
propagator_binded_ape = false;
}
void QKXTM_Propagator::packPropagator(void *propagator){
if(packPropagator_flag == false){
double *p_propagator = (double*) propagator;
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
h_elem[mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part] = p_propagator[iv*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*2 + nu*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part];
}
printfQuda("Propagator qkxTM packed on gpu form\n");
packPropagator_flag = true;
}
}
void QKXTM_Propagator::loadPropagator(){
if(packPropagator_flag == true && loadPropagator_flag == false){
cudaMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), cudaMemcpyHostToDevice );
checkCudaError();
loadPropagator_flag = true;
printfQuda("Propagator qkxTM loaded on gpu\n");
}
}
void QKXTM_Propagator::ghostToHost(){ // gpu collect ghost and send it to host
// direction x ////////////////////////////////////
#ifdef MULTI_GPU
if( G_localL[0] < G_totalL[0]){
int position;
int height = G_localL[1] * G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = 2*sizeof(double);
size_t spitch = G_localL[0]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = (G_localL[0]-1)*G_localL[1]*G_localL[2]*G_localL[3];
position = (G_localL[0]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[0]*2 + nu*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[0]*2 + nu*G_nColor*G_nColor*G_surface3D[0]*2 + c1*G_nColor*G_surface3D[0]*2 + c2*G_surface3D[0]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction y ///////////////////////////////////
if( G_localL[1] < G_totalL[1]){
int position;
int height = G_localL[2] * G_localL[3]; // number of blocks that we need
size_t width = G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[1]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*(G_localL[1]-1)*G_localL[2]*G_localL[3];
position = G_localL[0]*(G_localL[1]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[1]*2 + nu*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[1]*2 + nu*G_nColor*G_nColor*G_surface3D[1]*2 + c1*G_nColor*G_surface3D[1]*2 + c2*G_surface3D[1]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction z //////////////////////////////////
if( G_localL[2] < G_totalL[2]){
int position;
int height = G_localL[3]; // number of blocks that we need
size_t width = G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[2]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
// position = G_localL[0]*G_localL[1]*(G_localL[2]-1)*G_localL[3];
position = G_localL[0]*G_localL[1]*(G_localL[2]-1);
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_minusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[2]*2 + nu*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
// set minus points to plus area
position = 0;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
d_elem_offset = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + position*2;
h_elem_offset = h_elem + G_plusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*G_surface3D[2]*2 + nu*G_nColor*G_nColor*G_surface3D[2]*2 + c1*G_nColor*G_surface3D[2]*2 + c2*G_surface3D[2]*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
}
// direction t /////////////////////////////////////
if( G_localL[3] < G_totalL[3]){
int position;
int height = G_nSpin*G_nSpin*G_nColor*G_nColor;
size_t width = G_localL[2]*G_localL[1]*G_localL[0]*2*sizeof(double);
size_t spitch = G_localL[3]*width;
size_t dpitch = width;
double *h_elem_offset = NULL;
double *d_elem_offset = NULL;
// set plus points to minus area
position = G_localL[0]*G_localL[1]*G_localL[2]*(G_localL[3]-1);
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_minusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
// set minus points to plus area
position = 0;
d_elem_offset = d_elem + position*2;
h_elem_offset = h_elem + G_plusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
cudaMemcpy2D(h_elem_offset,dpitch,d_elem_offset,spitch,width,height,cudaMemcpyDeviceToHost);
}
#endif
}
void QKXTM_Propagator::cpuExchangeGhost(){ // cpus exchange links
#ifdef MULTI_GPU
if(comm_size() > 1){
MPI_Request request_recv[2*G_nDim];
MPI_Request request_send[2*G_nDim];
int back_nbr[4] = {X_BACK_NBR,Y_BACK_NBR,Z_BACK_NBR,T_BACK_NBR};
int fwd_nbr[4] = {X_FWD_NBR,Y_FWD_NBR,Z_FWD_NBR,T_FWD_NBR};
double *pointer_receive = NULL;
double *pointer_send = NULL;
// direction x
if(G_localL[0] < G_totalL[0]){
size_t nbytes = G_surface3D[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[0]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[0], 0, &(request_recv[0]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[0], 0, &(request_send[0]));
comm_wait(&(request_recv[0])); // blocking until receive finish
comm_wait(&(request_send[0]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[0]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[0]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[0], 1, &(request_recv[1]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[0], 1, &(request_send[1]));
comm_wait(&(request_recv[1])); // blocking until receive finish
comm_wait(&(request_send[1]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction y
if(G_localL[1] < G_totalL[1]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[1]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[1], 2, &(request_recv[2]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[1], 2, &(request_send[2]));
comm_wait(&(request_recv[2])); // blocking until receive finish
comm_wait(&(request_send[2]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[1]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[1]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[1], 3, &(request_recv[3]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[1], 3, &(request_send[3]));
comm_wait(&(request_recv[3])); // blocking until receive finish
comm_wait(&(request_send[3]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction z
if(G_localL[2] < G_totalL[2]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[2]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[2], 4, &(request_recv[4]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[2], 4, &(request_send[4]));
comm_wait(&(request_recv[4])); // blocking until receive finish
comm_wait(&(request_send[4]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[2]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[2]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[2], 5, &(request_recv[5]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[2], 5, &(request_send[5]));
comm_wait(&(request_recv[5])); // blocking until receive finish
comm_wait(&(request_send[5]));
pointer_receive = NULL;
pointer_send = NULL;
}
// direction t
if(G_localL[3] < G_totalL[3]){
// double *pointer_receive = NULL;
// double *pointer_send = NULL;
size_t nbytes = G_surface3D[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2*sizeof(double);
// send to plus
pointer_receive = h_ext_ghost + (G_minusGhost[3]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_minusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, back_nbr[3], 6, &(request_recv[6]));
comm_send_with_tag(pointer_send, nbytes, fwd_nbr[3], 6, &(request_send[6]));
comm_wait(&(request_recv[6])); // blocking until receive finish
comm_wait(&(request_send[6]));
// send to minus
pointer_receive = h_ext_ghost + (G_plusGhost[3]-G_localVolume)*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
pointer_send = h_elem + G_plusGhost[3]*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
comm_recv_with_tag(pointer_receive, nbytes, fwd_nbr[3], 7, &(request_recv[7]));
comm_send_with_tag(pointer_send, nbytes, back_nbr[3], 7, &(request_send[7]));
comm_wait(&(request_recv[7])); // blocking until receive finish
comm_wait(&(request_send[7]));
pointer_receive = NULL;
pointer_send = NULL;
}
}
#endif
}
void QKXTM_Propagator::ghostToDevice(){ // simple cudamemcpy to send ghost to device
#ifdef MULTI_GPU
if(comm_size() > 1){
double *host = h_ext_ghost;
double *device = d_elem + G_localVolume*G_nSpin*G_nSpin*G_nColor*G_nColor*2;
cudaMemcpy(device,host,bytes_ghost_length,cudaMemcpyHostToDevice);
checkCudaError();
}
#endif
}
void QKXTM_Propagator::bindPropagatorAPE(){
if( propagator_binded_ape == false ){
cudaBindTexture(0,propagatorTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
propagator_binded_ape = true;
}
void QKXTM_Propagator::unbindPropagatorAPE(){
if(propagator_binded_ape == true){
cudaUnbindTexture(propagatorTexAPE);
checkCudaError();
}
propagator_binded_ape = false;
}
void QKXTM_Propagator::rebindPropagatorAPE(){
cudaUnbindTexture(propagatorTexAPE);
cudaBindTexture(0,propagatorTexAPE,d_elem,bytes_total_length);
checkCudaError();
}
double QKXTM_Propagator::norm2Host(){
double res = 0.;
for(int i = 0 ; i < G_nSpin*G_nSpin*G_nColor*G_nColor*G_localVolume ; i++){
res += h_elem[i*2 + 0]*h_elem[i*2 + 0] + h_elem[i*2 + 1]*h_elem[i*2 + 1];
}
#ifdef MULTI_GPU
double globalRes;
int rc = MPI_Allreduce(&res , &globalRes , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for plaquette");
return globalRes ;
#else
return res;
#endif
}
double QKXTM_Propagator::norm2Device(){
double *h_partial = NULL;
double *d_partial = NULL;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
h_partial = (double*) malloc(gridDim.x * sizeof(double) ); // only real part
if(h_partial == NULL) errorQuda("Error allocate memory for host partial plaq");
cudaMalloc((void**)&d_partial, gridDim.x * sizeof(double));
cudaBindTexture(0,propagatorTexNorm2,d_elem, Bytes() - BytesGhost() );
norm2Propagator_kernel<<<gridDim,blockDim>>>(d_partial);
cudaDeviceSynchronize();
cudaUnbindTexture(propagatorTexNorm2);
cudaMemcpy(h_partial, d_partial , gridDim.x * sizeof(double) , cudaMemcpyDeviceToHost);
double norm2 = 0.;
// simple host reduction
for(int i = 0 ; i < gridDim.x ; i++)
norm2 += h_partial[i];
free(h_partial);
cudaFree(d_partial);
h_partial = NULL;
d_partial = NULL;
checkCudaError();
#ifdef MULTI_GPU
double globalNorm2;
int rc = MPI_Allreduce(&norm2 , &globalNorm2 , 1 , MPI_DOUBLE , MPI_SUM , MPI_COMM_WORLD);
if( rc != MPI_SUCCESS ) errorQuda("Error in MPI reduction for norm2");
return globalNorm2 ;
#else
return norm2;
#endif
}
void QKXTM_Propagator::absorbVector(QKXTM_Vector &vec, int nu, int c2){
double *pointProp;
double *pointVec;
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointProp = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2;
pointVec = vec.D_elem() + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2;
cudaMemcpy(pointProp,pointVec,G_localVolume*2*sizeof(double),cudaMemcpyDeviceToDevice);
}
checkCudaError();
}
void QKXTM_Propagator::download(){
cudaMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , cudaMemcpyDeviceToHost);
checkCudaError();
double *propagator_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(propagator_tmp == NULL)errorQuda("Error in allocate memory of tmp propagator");
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
propagator_tmp[iv*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*2 + nu*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part] = h_elem[mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + iv*2 + part];
}
memcpy(h_elem,propagator_tmp,Bytes() - BytesGhost());
free(propagator_tmp);
propagator_tmp = NULL;
}
void QKXTM_Propagator::rotateToPhysicalBasePlus(){
printfQuda("Perform rotation to physical base using + sign\n");
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
rotateToPhysicalBase_kernel<<<gridDim,blockDim>>>( (double2*) d_elem , +1); //kernel
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::rotateToPhysicalBaseMinus(){
printfQuda("Perform rotation to physical base using - sign\n");
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
rotateToPhysicalBase_kernel<<<gridDim,blockDim>>>( (double2*) d_elem , -1); //kernel
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::conjugate(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
conjugate_propagator_kernel<<<gridDim,blockDim>>>( (double2*) D_elem() );
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::applyGamma5(){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
apply_gamma5_propagator_kernel<<<gridDim,blockDim>>>( (double2*) D_elem() );
cudaDeviceSynchronize();
checkCudaError();
}
void QKXTM_Propagator::checkSum(){
download();
double *M = H_elem();
double sum_real,sum_imag;
sum_real = 0.;
sum_imag = 0.;
int mu =2;
int nu =0;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =2; nu =1;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =1; nu =2;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =1; nu =0;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =0; nu =2;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
mu =0; nu =1;
sum_real = 0.;sum_imag = 0.;
for(int t = 0 ; t < G_localL[3] ; t++)
for(int z = 0 ; z < G_localL[2] ; z++)
for(int y = 0 ; y < G_localL[1] ; y++)
for(int x = 0 ; x < G_localL[0] ; x++)
for(int c1 =0 ; c1 < 3 ; c1++)
for(int c2 =0 ; c2 < 3 ; c2++){
int prp_position = c2 + 3*c1 + 3*3*nu + 3*3*4*mu + 3*3*4*4*x + 3*3*4*4*G_localL[0]*y + 3*3*4*4*G_localL[0]*G_localL[1]*z + 3*3*4*4*G_localL[0]*G_localL[1]*G_localL[2]*t;
sum_real += M[prp_position*2 + 0];
sum_imag += M[prp_position*2 + 1];
}
printf("%d %+e %+e\n",comm_rank(),sum_real,sum_imag);
}
//////////////////////////////////////////// class QKXTM_Correlator ////////////
// Section 11: Class QKXTM_Correlator $$
//////////////////////////////////////////////////////////////////////////
QKXTM_Correlator::QKXTM_Correlator()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nSpin;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_Correlator::~QKXTM_Correlator(){
destroy_all();
}
void QKXTM_Correlator::download(){
cudaMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , cudaMemcpyDeviceToHost);
checkCudaError();
double *corr_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(corr_tmp == NULL)errorQuda("Error in allocate memory of tmp correlator");
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int part = 0 ; part < 2 ; part++){
corr_tmp[iv*G_nSpin*G_nSpin*2 + mu*G_nSpin*2 + nu*2 + part] = h_elem[mu*G_nSpin*G_localVolume*2 + nu*G_localVolume*2 + iv*2 + part];
}
memcpy(h_elem,corr_tmp,Bytes() - BytesGhost());
free(corr_tmp);
corr_tmp = NULL;
}
// spatial volume reduction ( first try with only zero momentum)
void QKXTM_Correlator::fourierCorr(double *corrMom, int Nmom , int momElem[][3]){
// corrMom must be allocated with G_localL[3]*Nmom*4*4*2
// slowest is time then momentum then gamma then gamma1 then r,i
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, correlationTex, d_elem, Bytes() );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(4*4*gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
cudaMalloc((void**)&d_partial_block, 4*4*gridDim.x*2 * sizeof(double) );
double reduction[4*4*2];
double globalReduction[4*4*2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
fourierCorr_kernel<<<gridDim,blockDim>>>((double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
cudaDeviceSynchronize();
cudaMemcpy(h_partial_block , d_partial_block , 4*4*gridDim.x*2 * sizeof(double) , cudaMemcpyDeviceToHost);
memset(reduction , 0 , 4*4*2 * sizeof(double) );
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[gamma*4*2 + gamma1*2 + 0] += h_partial_block[gamma*4*gridDim.x*2 + gamma1*gridDim.x*2 + i*2 + 0];
reduction[gamma*4*2 + gamma1*2 + 1] += h_partial_block[gamma*4*gridDim.x*2 + gamma1*gridDim.x*2 + i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 4*4*2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
corrMom[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 0] = globalReduction[gamma*4*2 + gamma1*2 + 0];
corrMom[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 1] = globalReduction[gamma*4*2 + gamma1*2 + 1];
}
}
} // for all momenta
} // for all local timeslice
cudaUnbindTexture(correlationTex);
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
void QKXTM_Correlator::packCorrelator(void *corr){
double *p_corr = (double*) corr;
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int part = 0 ; part < 2 ; part++){
h_elem[mu*G_nSpin*G_localVolume*2 + nu*G_localVolume*2 + iv*2 + part] = p_corr[iv*G_nSpin*G_nSpin*2 + mu*G_nSpin*2 + nu*2 + part];
}
printfQuda("Correlator qkxTM packed on gpu form\n");
}
void QKXTM_Correlator::loadCorrelator(){
cudaMemcpy(d_elem,h_elem,(bytes_total_length - bytes_ghost_length), cudaMemcpyHostToDevice );
checkCudaError();
printfQuda("Correlator qkxTM loaded on gpu\n");
}
//////////////////////////////////////////////////////// Contractions ///////////////////////////////////////
void quda::corrProton(QKXTM_Propagator &uprop, QKXTM_Propagator &dprop ,QKXTM_Correlator &corr){
printfQuda("Perform contractions for Proton\n");
cudaBindTexture(0,propagatorTexOne,uprop.D_elem(),uprop.Bytes()); // one will be up prop
cudaBindTexture(0,propagatorTexTwo,dprop.D_elem(),dprop.Bytes()); // two will be down prop
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
contract_Type1_kernel<<<gridDim,blockDim>>>((double2*) corr.D_elem() );
cudaDeviceSynchronize();
cudaUnbindTexture(propagatorTexOne);
cudaUnbindTexture(propagatorTexTwo);
checkCudaError();
}
void quda::corrNeutron(QKXTM_Propagator &uprop, QKXTM_Propagator &dprop ,QKXTM_Correlator &corr){
printfQuda("Perform contractions for Neutron\n");
cudaBindTexture(0,propagatorTexOne,dprop.D_elem(),dprop.Bytes());
cudaBindTexture(0,propagatorTexTwo,uprop.D_elem(),uprop.Bytes());
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
contract_Type1_kernel<<<gridDim,blockDim>>>((double2*) corr.D_elem() );
cudaDeviceSynchronize();
cudaUnbindTexture(propagatorTexOne);
cudaUnbindTexture(propagatorTexTwo);
checkCudaError();
}
//////////////// New //////////////////
void quda::corrPion(QKXTM_Propagator &prop, double *corr){
printfQuda("Perform contractions for Pion");
cudaBindTexture(0,propagatorTexOne,prop.D_elem(),prop.Bytes());
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
contract_twop_pion_kernel<<<gridDim,blockDim>>>((double2*) corr);
cudaDeviceSynchronize();
cudaUnbindTexture(propagatorTexOne);
checkCudaError();
}
void quda::performContractionsPion(QKXTM_Propagator &prop, int Nmom, int momElem[][3] , char *filenamePion){
if(init_qudaQKXTM == false)errorQuda("You must initialize qudaQKXTM first");
FILE *filePion;
if(comm_rank() == 0){
filePion = fopen(filenamePion,"w");
if(filePion == NULL){
fprintf(stderr,"Error open file paths for writting\n");
comm_exit(-1);
}
}
QKXTM_Field *field = new QKXTM_Field();
double *d_corr;
cudaMalloc((void**)&d_corr,field->Bytes());
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
corrPion(prop, d_corr);
field->fourierCorr(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(filePion,"%d %+d %+d %+d \t %+e %+e\n",it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
comm_barrier();
if(comm_rank() == 0)
fclose(filePion);
delete field;
cudaFree(d_corr);
free(corr_fourier);
free(corr_fourier_full);
}
/////////////////////////////////////////
void quda::performContractions(QKXTM_Propagator &uprop, QKXTM_Propagator &dprop , int Nmom, int momElem[][3] , char *filenameProton , char *filenameNeutron){
if(init_qudaQKXTM == false)errorQuda("You must initialize qudaQKXTM first");
FILE *fileProton, *fileNeutron;
if(comm_rank() == 0){
fileProton = fopen(filenameProton,"w");
fileNeutron = fopen(filenameNeutron,"w");
if(fileProton == NULL || fileNeutron == NULL){
fprintf(stderr,"Error open file paths for writting\n");
comm_exit(-1);
}
}
QKXTM_Correlator *corr = new QKXTM_Correlator();
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*4*4*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*4*4*2,sizeof(double));
corrProton(uprop,dprop,*corr);
corr->fourierCorr(corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(fileProton,"%d %+d %+d %+d %d %d \t %+e %+e\n",it,momElem[imom][0],momElem[imom][1],momElem[imom][2],gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 0] , corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
corrNeutron(uprop,dprop,*corr);
corr->fourierCorr(corr_fourier,Nmom,momElem);
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(fileNeutron,"%d %+d %+d %+d %d %d \t %+e %+e\n",it,momElem[imom][0],momElem[imom][1],momElem[imom][2],gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 0] , corr_fourier_full[it*Nmom*4*4*2 + imom*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
delete corr;
free(corr_fourier);
free(corr_fourier_full);
}
void quda::fixSinkFourier(double *corr,double *corrMom, int Nmom , int momElem[][3]){
// corrMom must be allocated with G_localL[3]*Nmom*2
// slowest is time then momentum then r,i
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, correlationTex, corr, G_localVolume*2*sizeof(double) );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
cudaMalloc((void**)&d_partial_block, gridDim.x*2 * sizeof(double) );
double reduction[2];
double globalReduction[2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
fourierCorr_kernel2<<<gridDim,blockDim>>>((double2*) d_partial_block ,it ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
cudaDeviceSynchronize();
cudaMemcpy(h_partial_block , d_partial_block , gridDim.x*2 * sizeof(double) , cudaMemcpyDeviceToHost);
memset(reduction , 0 , 2 * sizeof(double) );
for(int i =0 ; i < gridDim.x ; i++){
reduction[0] += h_partial_block[i*2 + 0];
reduction[1] += h_partial_block[i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
corrMom[it*Nmom*2 + imom*2 + 0] = globalReduction[0];
corrMom[it*Nmom*2 + imom*2 + 1] = globalReduction[1];
}
} // for all momenta
} // for all local timeslice
cudaUnbindTexture(correlationTex);
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
void quda::fixSinkContractions(QKXTM_Propagator &seqProp, QKXTM_Propagator &prop , QKXTM_Gauge &gauge,whatProjector typeProj , char *filename , int Nmom , int momElem[][3] , whatParticle testParticle, int partFlag){
if(typeProj == QKXTM_TYPE1)
sprintf(filename,"%s_%s",filename,"type1");
else if (typeProj == QKXTM_TYPE2)
sprintf(filename,"%s_%s",filename,"type2");
else if (typeProj == QKXTM_PROJ_G5G1)
sprintf(filename,"%s_%s",filename,"G5G1");
else if (typeProj == QKXTM_PROJ_G5G2)
sprintf(filename,"%s_%s",filename,"G5G2");
else if (typeProj == QKXTM_PROJ_G5G3)
sprintf(filename,"%s_%s",filename,"G5G3");
if(testParticle == QKXTM_PROTON)
sprintf(filename,"%s_%s",filename,"proton");
else
sprintf(filename,"%s_%s",filename,"neutron");
char filename_local[257] , filename_noether[257] , filename_oneD[257];
sprintf(filename_local,"%s_%s.dat",filename,"local");
sprintf(filename_noether,"%s_%s.dat",filename,"noether");
sprintf(filename_oneD,"%s_%s.dat",filename,"oneD");
FILE *fileLocal , *fileNoether , *fileOneD;
if(comm_rank() == 0){
fileLocal = fopen(filename_local,"w");
fileNoether = fopen(filename_noether,"w");
fileOneD = fopen(filename_oneD, "w");
if(fileLocal == NULL || fileOneD == NULL){
fprintf(stderr,"Error open file for writting\n");
comm_exit(-1);
}
}
seqProp.applyGamma5();
seqProp.conjugate();
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
// holds correlator in position space
double *d_corr;
cudaError error;
error = cudaMalloc((void**)&d_corr, G_localVolume*2*sizeof(double));
if(error != cudaSuccess)errorQuda("Error allocate device memory for correlator");
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
// to speed up contraction we use texture binding for seq-prop and prop
cudaBindTexture(0, seqPropagatorTex, seqProp.D_elem(), seqProp.Bytes());
cudaBindTexture(0, fwdPropagatorTex, prop.D_elem(), prop.Bytes());
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
for(int iflag = 0 ; iflag < 10 ; iflag++){
fixSinkContractions_local_kernel<<<gridDim,blockDim>>>((double2*) d_corr , iflag, testParticle, partFlag);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileLocal,"%d %d %+d %+d %+d \t %+e %+e\n",iflag,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// communication
cudaBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
gauge.ghostToHost();
gauge.cpuExchangeGhost(); // communicate gauge
gauge.ghostToDevice();
comm_barrier(); // just in case
prop.ghostToHost();
prop.cpuExchangeGhost(); // communicate forward propagator
prop.ghostToDevice();
comm_barrier(); // just in case
seqProp.ghostToHost();
seqProp.cpuExchangeGhost(); // communicate sequential propagator
seqProp.ghostToDevice();
comm_barrier(); // just in case
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// +++++++++++++++++++ conserved current +++++++++++++++++++++++++++++++++++++++++//
// mapping gamma indices
// g1 , g2 , g3 , g4
// 0 , 1 , 2 , 3
for(int idir = 0 ; idir < 4 ; idir++){
fixSinkContractions_noether_kernel<<<gridDim,blockDim>>>((double2*) d_corr , idir, testParticle, partFlag);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileNoether,"%d %d %+d %+d %+d \t %+e %+e\n",idir,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
}
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// +++++++++++++++++++ derivative operators ++++++++++++++++++++++++++++++++//
// for derivative operators we have for gamma matrices g1,g2,g3,g4 ,g5g1,g5g2,g5g3,g5g4 => 4+4 combinations
// for derivative index we have 4 index D^0 , D^1 , D^2 , D^3
// for total we have 8*4=32 combinations
// mapping gamma indices, (derivative will have a seperate index)
// g1 , g2 , g3 , g4 , g5g1 , g5g2 , g5g3 , g5g4
// 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7
// cudaPrintfInit();
for(int iflag = 0 ; iflag < 8 ; iflag++){ // iflag perform loop over gammas
for(int dir = 0 ; dir < 4 ; dir++){
fixSinkContractions_oneD_kernel<<<gridDim,blockDim>>>((double2*) d_corr , iflag, dir , testParticle, partFlag);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileOneD,"%d %d %d %+d %+d %+d \t %+e %+e\n",iflag,dir,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1]);
}
}
}
}
// if(comm_rank() == 0) cudaPrintfDisplay(stdout,true);
// cudaPrintfEnd();
// ------------------------------------------------------------------------------------------
cudaUnbindTexture(seqPropagatorTex);
cudaUnbindTexture(fwdPropagatorTex);
cudaUnbindTexture(gaugeDerivativeTex);
cudaFree(d_corr);
checkCudaError();
free(corr_fourier_full);
free(corr_fourier);
if(comm_rank() == 0){
fclose(fileLocal);
fclose(fileNoether);
fclose(fileOneD);
}
}
void quda::fixSinkContractions_nonLocal(QKXTM_Propagator &seqProp, QKXTM_Propagator &prop , QKXTM_Gauge &gauge,whatProjector typeProj , char *filename , int Nmom , int momElem[][3] , whatParticle testParticle, int partFlag, double *deviceWilsonPath, double *deviceWilsonPathBwd,int direction){
if(typeProj == QKXTM_TYPE1)
sprintf(filename,"%s_%s",filename,"type1");
else if (typeProj == QKXTM_TYPE2)
sprintf(filename,"%s_%s",filename,"type2");
else if (typeProj == QKXTM_PROJ_G5G1)
sprintf(filename,"%s_%s",filename,"G5G1");
else if (typeProj == QKXTM_PROJ_G5G2)
sprintf(filename,"%s_%s",filename,"G5G2");
else if (typeProj == QKXTM_PROJ_G5G3)
sprintf(filename,"%s_%s",filename,"G5G3");
if(testParticle == QKXTM_PROTON)
sprintf(filename,"%s_%s",filename,"proton");
else
sprintf(filename,"%s_%s",filename,"neutron");
char filename_nonLocal[257];
sprintf(filename_nonLocal,"%s_%s.dat",filename,"nonLocal");
FILE *fileNonLocal;
if(comm_rank() == 0){
fileNonLocal = fopen(filename_nonLocal,"w");
if(fileNonLocal == NULL){
fprintf(stderr,"Error open file for writting\n");
comm_exit(-1);
}
}
seqProp.applyGamma5();
seqProp.conjugate();
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
// holds correlator in position space
double *d_corr;
cudaError error;
error = cudaMalloc((void**)&d_corr, G_localVolume*2*sizeof(double));
if(error != cudaSuccess)errorQuda("Error allocate device memory for correlator");
double *corr_fourier = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
double *corr_fourier_bwd = (double*) calloc(G_localL[3]*Nmom*2,sizeof(double));
double *corr_fourier_full_bwd = (double*) calloc(G_totalL[3]*Nmom*2,sizeof(double));
// to speed up contraction we use texture binding for seq-prop and prop
cudaBindTexture(0, seqPropagatorTex, seqProp.D_elem(), seqProp.Bytes());
cudaBindTexture(0, fwdPropagatorTex, prop.D_elem(), prop.Bytes());
cudaBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
for(int dl = 0 ; dl < G_totalL[direction]/2 ; dl++){
//// fwd direction /////
fixSinkContractions_nonLocal_kernel<<<gridDim,blockDim>>>((double2*) d_corr ,(double2*) deviceWilsonPath, dl, testParticle, partFlag,direction);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier,Nmom,momElem);
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
/////////////
///// bwd direction ////////
fixSinkContractions_nonLocalBwd_kernel<<<gridDim,blockDim>>>((double2*) d_corr ,(double2*) deviceWilsonPathBwd, dl, testParticle, partFlag,direction);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
fixSinkFourier(d_corr,corr_fourier_bwd,Nmom,momElem);
error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(corr_fourier_bwd,G_localL[3]*Nmom*2,MPI_DOUBLE,corr_fourier_full_bwd,G_localL[3]*Nmom*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
//////////////
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom = 0 ; imom < Nmom ; imom++){
fprintf(fileNonLocal,"%d %d %+d %+d %+d \t %+e %+e \t %+e %+e\n",dl,it,momElem[imom][0],momElem[imom][1],momElem[imom][2],
corr_fourier_full[it*Nmom*2 + imom*2 + 0] , corr_fourier_full[it*Nmom*2 + imom*2 + 1],
corr_fourier_full_bwd[it*Nmom*2 + imom*2 + 0] , corr_fourier_full_bwd[it*Nmom*2 + imom*2 + 1]);
}
}
}
// ------------------------------------------------------------------------------------------
cudaUnbindTexture(seqPropagatorTex);
cudaUnbindTexture(fwdPropagatorTex);
cudaUnbindTexture(gaugeDerivativeTex);
cudaFree(d_corr);
checkCudaError();
free(corr_fourier_full);
free(corr_fourier);
free(corr_fourier_full_bwd);
free(corr_fourier_bwd);
if(comm_rank() == 0){
fclose(fileNonLocal);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// $$ Section 12: Class QKXTM_Propagator3D $$
/////////////////////////////////////////////////////// class Propagator 3D /////////
QKXTM_Propagator3D::QKXTM_Propagator3D()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
field_length = G_nSpin*G_nSpin*G_nColor*G_nColor;
bytes_total_length = G_localL[0]*G_localL[1]*G_localL[2]*field_length*2*sizeof(double);
create_host();
create_device();
zero();
}
QKXTM_Propagator3D::~QKXTM_Propagator3D(){
destroy_host();
destroy_device();
}
void QKXTM_Propagator3D::absorbTimeSlice(QKXTM_Propagator &prop, int timeslice){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++){
pointer_dst = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2;
pointer_src = prop.D_elem() + mu*G_nSpin*G_nColor*G_nColor*G_localVolume*2 + nu*G_nColor*G_nColor*G_localVolume*2 + c1*G_nColor*G_localVolume*2 + c2*G_localVolume*2 + timeslice*G_localVolume3D*2;
cudaMemcpy(pointer_dst, pointer_src, G_localVolume3D*2*sizeof(double), cudaMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Propagator3D::absorbVectorTimeSlice(QKXTM_Vector &vec, int timeslice, int nu , int c2){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2;
pointer_src = vec.D_elem() + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + timeslice*G_localVolume3D*2;
cudaMemcpy(pointer_dst, pointer_src, G_localVolume3D*2 * sizeof(double), cudaMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Propagator3D::download(){
cudaMemcpy(h_elem,d_elem,Bytes() , cudaMemcpyDeviceToHost);
checkCudaError();
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
double *propagator3D_tmp = (double*) malloc( Bytes() );
if(propagator3D_tmp == NULL)errorQuda("Error in allocate memory of tmp propagator");
for(int iv = 0 ; iv < G_localVolume3D ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int nu = 0 ; nu < G_nSpin ; nu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int c2 = 0 ; c2 < G_nColor ; c2++)
for(int part = 0 ; part < 2 ; part++){
propagator3D_tmp[iv*G_nSpin*G_nSpin*G_nColor*G_nColor*2 + mu*G_nSpin*G_nColor*G_nColor*2 + nu*G_nColor*G_nColor*2 + c1*G_nColor*2 + c2*2 + part] = h_elem[mu*G_nSpin*G_nColor*G_nColor*G_localVolume3D*2 + nu*G_nColor*G_nColor*G_localVolume3D*2 + c1*G_nColor*G_localVolume3D*2 + c2*G_localVolume3D*2 + iv*2 + part];
}
memcpy(h_elem,propagator3D_tmp,Bytes() );
free(propagator3D_tmp);
propagator3D_tmp = NULL;
}
void QKXTM_Propagator3D::justCopyToHost(){
cudaMemcpy(H_elem() , D_elem() , Bytes() , cudaMemcpyDeviceToHost);
checkCudaError();
}
void QKXTM_Propagator3D::justCopyToDevice(){
cudaMemcpy(D_elem() , H_elem() , Bytes() , cudaMemcpyHostToDevice);
checkCudaError();
}
void QKXTM_Propagator3D::broadcast(int tsink){
justCopyToHost(); // transfer data to host so we can communicate
comm_barrier();
int bcastRank = tsink/G_localL[3];
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
int error = MPI_Bcast(H_elem() , 4*4*3*3*G_localVolume3D*2 , MPI_DOUBLE , bcastRank , G_timeComm ); // broadcast the data from node that has the tsink to other nodes
if(error != MPI_SUCCESS)errorQuda("Error in mpi broadcasting");
justCopyToDevice();
}
// $$ Section 13: Class QKXTM_Vector3D $$
//////////////////////////////////////////////////// class Vector3D
QKXTM_Vector3D::QKXTM_Vector3D()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = G_nSpin*G_nColor;
for(int i = 0 ; i < G_nDim ; i++)
ghost_length += 2*G_surface3D[i];
total_length = G_localVolume/G_localL[3] + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_host();
create_device();
zero();
}
QKXTM_Vector3D::~QKXTM_Vector3D(){
destroy_host();
destroy_device();
}
void QKXTM_Vector3D::absorbTimeSlice(QKXTM_Vector &vec, int timeslice){
double *pointer_src = NULL;
double *pointer_dst = NULL;
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
for(int mu = 0 ; mu < G_nSpin ; mu++)
for(int c1 = 0 ; c1 < G_nColor ; c1++){
pointer_dst = d_elem + mu*G_nColor*G_localVolume3D*2 + c1*G_localVolume3D*2;
pointer_src = vec.D_elem() + mu*G_nColor*G_localVolume*2 + c1*G_localVolume*2 + timeslice*G_localVolume3D*2;
cudaMemcpy(pointer_dst, pointer_src, G_localVolume3D*2*sizeof(double), cudaMemcpyDeviceToDevice);
}
pointer_src = NULL;
pointer_dst = NULL;
checkCudaError();
}
void QKXTM_Vector3D::justCopyToHost(){
cudaMemcpy(H_elem() , D_elem() , Bytes() , cudaMemcpyDeviceToHost);
checkCudaError();
}
void QKXTM_Vector3D::justCopyToDevice(){
cudaMemcpy(D_elem() , H_elem() , Bytes() , cudaMemcpyHostToDevice);
checkCudaError();
}
void QKXTM_Vector3D::download(){
cudaMemcpy(h_elem,d_elem,Bytes() - BytesGhost() , cudaMemcpyDeviceToHost);
checkCudaError();
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
double *vector_tmp = (double*) malloc( Bytes() - BytesGhost() );
if(vector_tmp == NULL)errorQuda("Error in allocate memory of tmp vector");
for(int iv = 0 ; iv < G_localVolume3D ; iv++)
for(int mu = 0 ; mu < G_nSpin ; mu++) // always work with format colors inside spins
for(int c1 = 0 ; c1 < G_nColor ; c1++)
for(int part = 0 ; part < 2 ; part++){
vector_tmp[iv*G_nSpin*G_nColor*2 + mu*G_nColor*2 + c1*2 + part] = h_elem[mu*G_nColor*G_localVolume3D*2 + c1*G_localVolume3D*2 + iv*2 + part];
}
memcpy(h_elem,vector_tmp,Bytes() - BytesGhost());
free(vector_tmp);
vector_tmp = NULL;
}
void QKXTM_Vector3D::broadcast(int tsink){
justCopyToHost(); // transfer data to host so we can communicate
comm_barrier();
int bcastRank = tsink/G_localL[3];
int G_localVolume3D = G_localL[0]*G_localL[1]*G_localL[2];
int error = MPI_Bcast(H_elem() , 4*3*G_localVolume3D*2 , MPI_DOUBLE , bcastRank , G_timeComm ); // broadcast the data from node that has the tsink to other nodes
if(error != MPI_SUCCESS)errorQuda("Error in mpi broadcasting");
justCopyToDevice();
}
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
//****************************************************************************************************************************************************************************
// $$ Section 14: Stochastic Connected Diagrams $$
// Contents
// 1) insLineFourier
// 2) write_3pf_local
// 3) write_3pf_oneD
// 4) QKXTM_Vector3D::fourier
// 5) partialContract3pf_upart_proton
// 6) partialContract3pf_upart_neutron
// 7) partialContract3pf_dpart_neutron
// 8) partialContract3pf_dpart_proton
// 9) finalize_contract3pf_mixLevel
// 10) finalize_contract3pf_oneLevel
// 11) threepStochUpart
// 12) threepStochDpart
///////////////////////////////////////////// functions for stochastic three point functions //////////
#define WRITE_BINARY
QKXTM_VectorX8::QKXTM_VectorX8()
{
if(G_init_qudaQKXTM_flag == false) errorQuda("You must initialize init_qudaQKXTM first");
ghost_length = 0;
field_length = 8*G_nSpin*G_nColor;
total_length = G_localVolume + ghost_length;
bytes_total_length = total_length*field_length*2*sizeof(double);
bytes_ghost_length = ghost_length*field_length*2*sizeof(double);
create_all();
}
QKXTM_VectorX8::~QKXTM_VectorX8(){
destroy_all();
}
void quda::insLineFourier(double *insLineMom , double *insLine, int Nmom , int momElem[][3]){
// insLineMom time,spin,color
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, insLineFourierTex, insLine, 4*3*G_localVolume*2*sizeof(double) );
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(4*3*gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
cudaMalloc((void**)&d_partial_block, 4*3*gridDim.x*2 * sizeof(double) );
double reduction[4*3*2];
double globalReduction[4*3*2];
for(int it = 0 ; it < G_localL[3] ; it++){
for(int imom = 0 ; imom < Nmom ; imom++){
fourierCorr_kernel3<<<gridDim,blockDim>>>((double2*) d_partial_block ,it , momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // future include mom here
cudaDeviceSynchronize();
cudaMemcpy(h_partial_block , d_partial_block , 4*3*gridDim.x*2 * sizeof(double) , cudaMemcpyDeviceToHost);
memset(reduction , 0 , 4*3*2 * sizeof(double) );
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[gamma*3*2 + c1*2 + 0] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 0];
reduction[gamma*3*2 + c1*2 + 1] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 4*3*2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++){
insLineMom[it*Nmom*4*3*2 + imom*4*3*2 + gamma*3*2 + c1*2 + 0] = globalReduction[gamma*3*2 + c1*2 + 0];
insLineMom[it*Nmom*4*3*2 + imom*4*3*2 + gamma*3*2 + c1*2 + 1] = globalReduction[gamma*3*2 + c1*2 + 1];
}
}
}
} // for all local timeslice
cudaUnbindTexture(insLineFourierTex);
free(h_partial_block);
cudaFree(d_partial_block);
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
// we must calculate insetion line for all the operators
#define MAX_PARTICLES 18
/*
static void write_3pf_local(FILE *file_ptr, double *results, int iflag , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int imom2 = 0 ; imom2 < Nmom ; imom2++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %+d %+d %+d %+d %+d %+d %d %d \t %+e %+e\n",iflag,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
momElem[imom2][0],momElem[imom2][1],momElem[imom2][2],
gamma,gamma1,corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 + imom2*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 +imom2*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_oneD(FILE *file_ptr, double *results, int iflag ,int dir , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int imom2 = 0 ; imom2 < Nmom ; imom2++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d %+d %+d %+d %d %d \t %+e %+e\n",iflag,dir,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
momElem[imom2][0],momElem[imom2][1],momElem[imom2][2],
gamma,gamma1,corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 + imom2*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*Nmom*4*4*2 + imom1*Nmom*4*4*2 +imom2*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
*/
static void write_3pf_Nonlocal_zeroMomIns(FILE *file_ptr, double *results,int dir, int iflag , int NmomSink , int momElemSink[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*NmomSink*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*NmomSink*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*NmomSink*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d %d %d \t %+e %+e\n",dir,iflag,it,momElemSink[imom1][0],momElemSink[imom1][1],momElemSink[imom1][2],
gamma,gamma1,corr_fourier_full[it*NmomSink*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*NmomSink*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_local_zeroMomSink(FILE *file_ptr, double *results, int iflag , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
#ifdef WRITE_BINARY
fwrite((void*) corr_fourier_full, sizeof(double) , G_totalL[3]*Nmom*4*4*2,file_ptr);
#else
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %+d %+d %+d %d %d \t %+e %+e\n",iflag,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
#endif
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_oneD_zeroMomSink(FILE *file_ptr, double *results, int iflag ,int dir , int Nmom , int momElem[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*Nmom*4*4*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*Nmom*4*4*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
#ifdef WRITE_BINARY
fwrite((void*) corr_fourier_full, sizeof(double) , G_totalL[3]*Nmom*4*4*2,file_ptr);
#else
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++)
for(int gamma = 0 ; gamma < G_nSpin ; gamma++)
for(int gamma1 = 0 ; gamma1 < G_nSpin ; gamma1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d %d %d \t %+e %+e\n",iflag,dir,it,momElem[imom1][0],momElem[imom1][1],momElem[imom1][2],
gamma,gamma1,corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 0] ,
corr_fourier_full[it*Nmom*4*4*2 + imom1*4*4*2 + gamma*4*2 + gamma1*2 + 1]);
}
#endif
}
comm_barrier();
free(corr_fourier_full);
}
static void write_3pf_Nonlocal_Pion_zeroMomIns(FILE *file_ptr, double *results,int dir, int iflag , int NmomSink , int momElemSink[][3] ){
double *corr_fourier_full = (double*) calloc(G_totalL[3]*NmomSink*2,sizeof(double));
int error = 0;
if(G_timeRank >= 0 && G_timeRank < G_nProc[3] ){
error = MPI_Gather(results,G_localL[3]*NmomSink*2,MPI_DOUBLE,corr_fourier_full,G_localL[3]*NmomSink*2,MPI_DOUBLE,0,G_timeComm);
if(error != MPI_SUCCESS) errorQuda("Error in MPI_gather");
}
if(comm_rank() == 0){
for(int it = 0 ; it < G_totalL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
fprintf(file_ptr,"%d %d %d %+d %+d %+d \t %+e %+e\n",dir,iflag,it,momElemSink[imom1][0],momElemSink[imom1][1],momElemSink[imom1][2],
corr_fourier_full[it*NmomSink*2 + imom1*2 + 0] ,
corr_fourier_full[it*NmomSink*2 + imom1*2 + 1]);
}
}
comm_barrier();
free(corr_fourier_full);
}
void QKXTM_Vector3D::fourier(double *vecMom, int Nmom , int momElem[][3]){
// vecMom must be allocated with Nmom*4*3*2
// slowest is momentum then gamma then c1 then r,i
// cudaError error;
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1); // now is G_localVolume3D
cudaBindTexture(0, correlationTex, d_elem, Bytes() );
//if(error != cudaSuccess)fprintf(stderr,"Error bind texture\n");
double *h_partial_block = NULL;
double *d_partial_block = NULL;
h_partial_block = (double*) malloc(4*3*gridDim.x*2 * sizeof(double) ); // for complex *2
if(h_partial_block == NULL) errorQuda("error allocate memory for host partial block");
cudaMalloc((void**)&d_partial_block, 4*3*gridDim.x*2 * sizeof(double) );
//if(error != cudaSuccess)fprintf(stderr,"Error malloc\n");
double reduction[4*3*2];
double globalReduction[4*3*2];
for(int imom = 0 ; imom < Nmom ; imom++){
fourierCorr_kernel4<<<gridDim,blockDim>>>((double2*) d_partial_block ,momElem[imom][0] , momElem[imom][1] , momElem[imom][2] ); // source position and proc position is in constant memory
cudaDeviceSynchronize();
cudaMemcpy(h_partial_block , d_partial_block , 4*3*gridDim.x*2 * sizeof(double) , cudaMemcpyDeviceToHost);
//if(error != cudaSuccess)fprintf(stderr,"Error memcpy\n");
memset(reduction , 0 , 4*3*2 * sizeof(double) );
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++)
for(int i =0 ; i < gridDim.x ; i++){
reduction[gamma*3*2 + c1*2 + 0] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 0];
reduction[gamma*3*2 + c1*2 + 1] += h_partial_block[gamma*3*gridDim.x*2 + c1*gridDim.x*2 + i*2 + 1];
}
MPI_Reduce(&(reduction[0]) , &(globalReduction[0]) , 4*3*2 , MPI_DOUBLE , MPI_SUM , 0 , G_spaceComm); // only local root has the right value
if(G_localRank == 0){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int c1 = 0 ; c1 < 3 ; c1++){
vecMom[ imom*4*3*2 + gamma*3*2 + c1*2 + 0] = globalReduction[gamma*3*2 + c1*2 + 0];
vecMom[ imom*4*3*2 + gamma*3*2 + c1*2 + 1] = globalReduction[gamma*3*2 + c1*2 + 1];
}
}
} // for all momenta
cudaUnbindTexture(correlationTex);
free(h_partial_block);
cudaFree(d_partial_block);
//if(error != cudaSuccess)fprintf(stderr,"Error cuda free\n");
checkCudaError();
h_partial_block = NULL;
d_partial_block = NULL;
}
static void partialContract3pf_upart_pion(double *pion_level, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
// cudaPrintfInit ();
partial_Contract3pf_pion_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem() , 0);
// cudaPrintfDisplay (stdout, true);
//cudaPrintfEnd ();
cudaDeviceSynchronize();
checkCudaError();
vec3D.fourier(pion_level,Nmom,momElem);
}
static void partialContract3pf_dpart_pion(double *pion_level, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
partial_Contract3pf_pion_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem() , 1);
cudaDeviceSynchronize();
checkCudaError();
vec3D.fourier(pion_level,Nmom,momElem);
}
static void partialContract3pf_upart_proton(double *proton_level1,double *proton_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
partial_lvl1_Contract3pf_Type1_1_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem() , 1 , 2);
cudaDeviceSynchronize();
checkCudaError();
vec3D.fourier(proton_level1,Nmom,momElem);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
partial_lvl3_Contract3pf_Type1_1_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem(),gamma,gamma1, 1 , 2);
cudaDeviceSynchronize();
checkCudaError();
double *ptr = proton_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void partialContract3pf_upart_neutron(double *neutron_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
partial_lvl3_Contract3pf_Type1_2_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem(),gamma,gamma1, 2);
cudaDeviceSynchronize();
checkCudaError();
double *ptr = neutron_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void partialContract3pf_dpart_neutron(double *neutron_level1,double *neutron_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
partial_lvl1_Contract3pf_Type1_1_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem() , 2 , 1);
cudaDeviceSynchronize();
checkCudaError();
vec3D.fourier(neutron_level1,Nmom,momElem);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
partial_lvl3_Contract3pf_Type1_1_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem(),gamma,gamma1, 2 , 1);
cudaDeviceSynchronize();
checkCudaError();
double *ptr = neutron_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void partialContract3pf_dpart_proton(double *proton_level3, QKXTM_Vector3D &vec3D , int Nmom,int momElem[][3]){
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume/G_localL[3] + blockDim.x -1)/blockDim.x , 1 , 1);
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
partial_lvl3_Contract3pf_Type1_2_kernel<<<gridDim,blockDim>>>((double2*) vec3D.D_elem(),gamma,gamma1, 1);
cudaDeviceSynchronize();
checkCudaError();
double *ptr = proton_level3 + gamma*4*Nmom*4*3*2 + gamma1*Nmom*4*3*2;
vec3D.fourier(ptr,Nmom,momElem);
}
checkCudaError();
}
static void finalize_contract3pf_mixLevel(Complex *res,Complex *Iins, Complex *lvl3, Complex *lvl1, int Nmom, int momElem[][3]){
memset(res,0,G_localL[3]*Nmom*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++){
res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*Nmom*4*3 + imom1*4*3 + gamma1*3 + color] * lvl1[gamma*3 + color];
for(int spin = 0 ; spin < 4 ; spin++){
res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*Nmom*4*3 + imom1*4*3 + spin*3 + color] * lvl3[gamma*4*4*3 + gamma1*4*3 + spin*3 + color];
}
}
}
}
}
static void finalize_contract3pf_oneLevel(Complex *res,Complex *Iins, Complex *lvl3, int Nmom, int momElem[][3]){
memset(res,0,G_localL[3]*Nmom*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < Nmom ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++)
for(int spin = 0 ; spin < 4 ; spin++){
res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*Nmom*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*Nmom*4*3 + imom1*4*3 + spin*3 + color] * lvl3[gamma*4*4*3 + gamma1*4*3 + spin*3 + color];
}
}
}
}
static void finalize_contract3pf_mixLevel_SinkMom(Complex *res,Complex *Iins, Complex *lvl3, Complex *lvl1, int NmomSink, int momElemSink[][3]){
memset(res,0,G_localL[3]*NmomSink*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++){
res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*4*3 + gamma1*3 + color] * lvl1[ imom1*4*3 + gamma*3 + color];
for(int spin = 0 ; spin < 4 ; spin++){
res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*4*3 + spin*3 + color] * lvl3[gamma*4*NmomSink*4*3 + gamma1*NmomSink*4*3 + imom1*4*3 + spin*3 + color];
}
}
}
}
}
static void finalize_contract3pf_oneLevel_SinkMom(Complex *res,Complex *Iins, Complex *lvl3, int NmomSink, int momElemSink[][3]){
memset(res,0,G_localL[3]*NmomSink*4*4*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
for(int gamma = 0 ; gamma < 4 ; gamma++)
for(int gamma1 = 0 ; gamma1 < 4 ; gamma1++){
for(int color = 0 ; color < 3 ; color++)
for(int spin = 0 ; spin < 4 ; spin++){
res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] = res[it*NmomSink*4*4 + imom1*4*4 + gamma*4 + gamma1] +
Iins[it*4*3 + spin*3 + color] * lvl3[gamma*4*NmomSink*4*3 + gamma1*NmomSink*4*3 + imom1*4*3 + spin*3 + color];
}
}
}
}
static void finalize_contract3pf_Pion_SinkMom(Complex *res,Complex *Iins, Complex *lvl, int NmomSink, int momElemSink[][3]){
memset(res,0,G_localL[3]*NmomSink*2*sizeof(double));
for(int it = 0 ; it < G_localL[3] ; it++)
for(int imom1 = 0 ; imom1 < NmomSink ; imom1++){
for(int color = 0 ; color < 3 ; color++)
for(int spin = 0 ; spin < 4 ; spin++)
res[it*NmomSink + imom1] = res[it*NmomSink + imom1] + Iins[it*4*3 + spin*3 + color] * lvl[imom1*4*3 + spin*3 + color];
}
}
//#define NEW_VERSION
void quda::threepStochUpart( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &uprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D, QKXTM_Gauge &gauge, int fixTime , char *filename ,int Nmom , int momElem[][3]){
// ! fix time is the absolute sink time
int NmomSink = 1;
int momElemSink[1][3];
momElemSink[0][0] = 0;
momElemSink[0][1] = 0;
momElemSink[0][2] = 0;
char particles_filename[MAX_PARTICLES][257];
char particles_filename_noether[MAX_PARTICLES][257];
char particles_filename_oneD[MAX_PARTICLES][257];
FILE *file_local[MAX_PARTICLES];
FILE *file_noether[MAX_PARTICLES];
FILE *file_oneD[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_local.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_local.dat");
sprintf(particles_filename_noether[0],"%s_%s",filename,"proton_noether.dat");
sprintf(particles_filename_noether[1],"%s_%s",filename,"neutron_noether.dat");
sprintf(particles_filename_oneD[0],"%s_%s",filename,"proton_oneD.dat");
sprintf(particles_filename_oneD[1],"%s_%s",filename,"neutron_oneD.dat");
if(comm_rank() == 0){
#ifdef WRITE_BINARY
file_local[0] = fopen(particles_filename[0],"ab");
file_local[1] = fopen(particles_filename[1],"ab");
file_noether[0] = fopen(particles_filename_noether[0],"ab");
file_noether[1] = fopen(particles_filename_noether[1],"ab");
file_oneD[0] = fopen(particles_filename_oneD[0],"ab");
file_oneD[1] = fopen(particles_filename_oneD[1],"ab");
#else
file_local[0] = fopen(particles_filename[0],"a");
file_local[1] = fopen(particles_filename[1],"a");
file_noether[0] = fopen(particles_filename_noether[0],"a");
file_noether[1] = fopen(particles_filename_noether[1],"a");
file_oneD[0] = fopen(particles_filename_oneD[0],"a");
file_oneD[1] = fopen(particles_filename_oneD[1],"a");
#endif
if(file_local[0] == NULL || file_local[1] == NULL || file_oneD[0] == NULL || file_oneD[1] == NULL || file_noether[0] == NULL || file_noether[1] == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
// QKXTM_Vector3D *levelVec = new QKXTM_Vector3D[8];
// QKXTM_Vector3D *levelVec = malloc(8*sizeof(QKXTM_Vector3D));
//levelVec[0]->QKXTM_Vector3D();
cudaBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
cudaBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
cudaBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *proton_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(proton_level1 == NULL || proton_level3 == NULL || neutron_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_upart_proton(proton_level1,proton_level3,*levelVec,NmomSink,momElemSink);
partialContract3pf_upart_neutron(neutron_level3,*levelVec,NmomSink,momElemSink);
cudaUnbindTexture(xiVector3DStochTex);
cudaUnbindTexture(uprop3DStochTex);
cudaUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(10*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineNoetherMom = (double*) malloc(4*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineOneDMom = (double*) malloc(8*4*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL || insLineOneDMom == NULL || insLineNoetherMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
cudaBindTexture(0, propStochTex, uprop.D_elem(), uprop.Bytes());
cudaBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int iflag = 0 ; iflag < 10 ; iflag++){
insLine_local_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , iflag , 1); // (1,2,3) (upart,dpart,spart)
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// communication
cudaBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
gauge.ghostToHost();
gauge.cpuExchangeGhost(); // communicate gauge
gauge.ghostToDevice();
comm_barrier(); // just in case
uprop.ghostToHost();
uprop.cpuExchangeGhost(); // communicate propagator
uprop.ghostToDevice();
comm_barrier(); // just in case
phi.ghostToHost();
phi.cpuExchangeGhost(); // communicate stochastic vector
phi.ghostToDevice();
comm_barrier(); // just in case
// +++++++++++++++++++ conserved current ++++++++++++++++++++++++++++++++//
// mapping gamma
// g1 , g2 , g3 , g4
// 0 , 1 , 2 , 3
for(int idir = 0 ; idir < 4 ; idir++){
insLine_noether_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , idir);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
// +++++++++++++++++++ derivative operators ++++++++++++++++++++++++++++++++//
// for derivative operators we have for gamma matrices g1,g2,g3,g4 ,g5g1,g5g2,g5g3,g5g4 => 4+4 combinations
// for derivative index we have 4 index D^0 , D^1 , D^2 , D^3
// for total we have 8*4=32 combinations
// mapping gamma indices, (derivative will have a seperate index)
// g1 , g2 , g3 , g4 , g5g1 , g5g2 , g5g3 , g5g4
// 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7
//#ifdef NEW_VERSION
QKXTM_VectorX8 *insLineX8 = new QKXTM_VectorX8();
for(int dir = 0 ; dir < 4 ; dir++){
insLine_oneD_kernel_new<<<gridDim,blockDim>>>((double2*) insLineX8->D_elem(), dir);
cudaDeviceSynchronize();
checkCudaError();
for(int iflag = 0 ; iflag < 8 ; iflag++){
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2, insLineX8->D_elem() + iflag*G_nSpin*G_nColor*G_localVolume*2, Nmom, momElem);
cudaDeviceSynchronize();
checkCudaError();
}
}
delete insLineX8;
//#else
/*
for(int iflag = 0 ; iflag < 8 ; iflag++) // iflag perform loop over gammas
for(int dir = 0 ; dir < 4 ; dir++){
// need to find a way to improve it
insLine_oneD_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , iflag ,dir); // we dont need part here because operators are vector , axial
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
*/
delete insLine;
cudaUnbindTexture(gaugeDerivativeTex);
printfQuda("Finish insertion line\n");
cudaUnbindTexture(phiVectorStochTex);
cudaUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*Nmom*4*4*2*sizeof(double));
// write local
for(int iflag = 0 ; iflag < 10 ; iflag++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[0],res,iflag,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[1],res,iflag,Nmom,momElem);
}
// write noether
for(int idir = 0 ; idir < 4 ; idir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[0],res,idir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[1],res,idir,Nmom,momElem);
}
// write derivatives
for(int iflag = 0 ; iflag < 8 ; iflag++)
for(int dir = 0 ; dir < 4 ; dir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[0],res,iflag,dir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[1],res,iflag,dir,Nmom,momElem);
}
free(res);
if(comm_rank()==0){
fclose(file_local[0]);
fclose(file_local[1]);
fclose(file_noether[0]);
fclose(file_noether[1]);
fclose(file_oneD[0]);
fclose(file_oneD[1]);
}
free(insLineMom);
free(insLineNoetherMom);
free(insLineOneDMom);
free(proton_level1);
free(proton_level3);
free(neutron_level3);
checkCudaError();
}
void quda::threepStochDpart( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &dprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D, QKXTM_Gauge &gauge, int fixTime , char *filename ,int Nmom , int momElem[][3]){
int NmomSink = 1;
int momElemSink[1][3];
momElemSink[0][0] = 0;
momElemSink[0][1] = 0;
momElemSink[0][2] = 0;
// ! fix time is the absolute sink time
char particles_filename[MAX_PARTICLES][257];
char particles_filename_noether[MAX_PARTICLES][257];
char particles_filename_oneD[MAX_PARTICLES][257];
FILE *file_local[MAX_PARTICLES];
FILE *file_noether[MAX_PARTICLES];
FILE *file_oneD[MAX_PARTICLES];
// FILE *file_oneD[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_local.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_local.dat");
sprintf(particles_filename_noether[0],"%s_%s",filename,"proton_noether.dat");
sprintf(particles_filename_noether[1],"%s_%s",filename,"neutron_noether.dat");
sprintf(particles_filename_oneD[0],"%s_%s",filename,"proton_oneD.dat");
sprintf(particles_filename_oneD[1],"%s_%s",filename,"neutron_oneD.dat");
if(comm_rank() == 0){
#ifdef WRITE_BINARY
file_local[0] = fopen(particles_filename[0],"ab");
file_local[1] = fopen(particles_filename[1],"ab");
file_noether[0] = fopen(particles_filename_noether[0],"ab");
file_noether[1] = fopen(particles_filename_noether[1],"ab");
file_oneD[0] = fopen(particles_filename_oneD[0],"ab");
file_oneD[1] = fopen(particles_filename_oneD[1],"ab");
#else
file_local[0] = fopen(particles_filename[0],"a");
file_local[1] = fopen(particles_filename[1],"a");
file_noether[0] = fopen(particles_filename_noether[0],"a");
file_noether[1] = fopen(particles_filename_noether[1],"a");
file_oneD[0] = fopen(particles_filename_oneD[0],"a");
file_oneD[1] = fopen(particles_filename_oneD[1],"a");
#endif
if(file_local[0] == NULL || file_local[1] == NULL || file_oneD[0] == NULL || file_oneD[1] == NULL || file_noether[0] == NULL || file_noether[1] == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
cudaBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
cudaBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
cudaBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *neutron_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(neutron_level1 == NULL || neutron_level3 == NULL || proton_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_dpart_neutron(neutron_level1,neutron_level3,*levelVec,NmomSink,momElemSink);
partialContract3pf_dpart_proton(proton_level3,*levelVec,NmomSink,momElemSink);
cudaUnbindTexture(xiVector3DStochTex);
cudaUnbindTexture(uprop3DStochTex);
cudaUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(10*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineNoetherMom = (double*) malloc(4*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineOneDMom = (double*) malloc(8*4*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL || insLineOneDMom == NULL || insLineNoetherMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
cudaBindTexture(0, propStochTex, dprop.D_elem(), dprop.Bytes());
cudaBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int iflag = 0 ; iflag < 10 ; iflag++){
insLine_local_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , iflag , 2); // (1,2,3) (upart,dpart,spart)
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //
// communication
cudaBindTexture(0, gaugeDerivativeTex, gauge.D_elem(), gauge.Bytes());
gauge.ghostToHost();
gauge.cpuExchangeGhost(); // communicate gauge
gauge.ghostToDevice();
comm_barrier(); // just in case
dprop.ghostToHost();
dprop.cpuExchangeGhost(); // communicate propagator
dprop.ghostToDevice();
comm_barrier(); // just in case
phi.ghostToHost();
phi.cpuExchangeGhost(); // communicate stochastic vector
phi.ghostToDevice();
comm_barrier(); // just in case
// +++++++++++++++++++ conserved current +++++++++++++++++++++++++++++++++++//
// mapping gamma
// g1 , g2 , g3 , g4
// 0 , 1 , 2 , 3
for(int idir = 0 ; idir < 4 ; idir++){
insLine_noether_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , idir);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++ derivative operators ++++++++++++++++++++++++++++++++//
// for derivative operators we have for gamma matrices g1,g2,g3,g4 ,g5g1,g5g2,g5g3,g5g4 => 4+4 combinations
// for derivative index we have 4 index D^0 , D^1 , D^2 , D^3
// for total we have 8*4=32 combinations
// mapping gamma indices, (derivative will have a seperate index)
// g1 , g2 , g3 , g4 , g5g1 , g5g2 , g5g3 , g5g4
// 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7
//#ifdef NEW_VERSION
QKXTM_VectorX8 *insLineX8 = new QKXTM_VectorX8();
for(int dir = 0 ; dir < 4 ; dir++){
insLine_oneD_kernel_new<<<gridDim,blockDim>>>((double2*) insLineX8->D_elem(), dir);
cudaDeviceSynchronize();
checkCudaError();
for(int iflag = 0 ; iflag < 8 ; iflag++){
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2, insLineX8->D_elem() + iflag*G_nSpin*G_nColor*G_localVolume*2, Nmom, momElem);
cudaDeviceSynchronize();
checkCudaError();
}
}
delete insLineX8;
/*
//#else
for(int iflag = 0 ; iflag < 8 ; iflag++) // iflag perform loop over gammas
for(int dir = 0 ; dir < 4 ; dir++){
// need to find a way to improve it
insLine_oneD_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , iflag ,dir); // we dont need part here because operators are vector , axial
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
*/
//#endif
cudaUnbindTexture(gaugeDerivativeTex);
printfQuda("Finish insertion line\n");
delete insLine;
cudaUnbindTexture(phiVectorStochTex);
cudaUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*Nmom*4*4*2*sizeof(double));
// write local
for(int iflag = 0 ; iflag < 10 ; iflag++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[1],res,iflag,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineMom + iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_local[0],res,iflag,Nmom,momElem);
}
// write conserved
for(int idir = 0 ; idir < 4 ; idir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[1],res,idir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineNoetherMom + idir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, Nmom, momElem);
write_3pf_local_zeroMomSink(file_noether[0],res,idir,Nmom,momElem);
}
// write derivatives
for(int iflag = 0 ; iflag < 8 ; iflag++)
for(int dir = 0 ; dir < 4 ; dir++){
finalize_contract3pf_mixLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[1],res,iflag,dir,Nmom,momElem);
finalize_contract3pf_oneLevel((Complex*) res,(Complex*) (insLineOneDMom + iflag*4*G_localL[3]*Nmom*4*3*2 + dir*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, Nmom, momElem);
write_3pf_oneD_zeroMomSink(file_oneD[0],res,iflag,dir,Nmom,momElem);
}
free(res);
if(comm_rank()==0){
fclose(file_local[0]);
fclose(file_local[1]);
fclose(file_noether[0]);
fclose(file_noether[1]);
fclose(file_oneD[0]);
fclose(file_oneD[1]);
}
free(insLineMom);
free(insLineNoetherMom);
free(insLineOneDMom);
free(neutron_level1);
free(neutron_level3);
free(proton_level3);
checkCudaError();
}
//
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void quda::threepStochUpart_WilsonLinks( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &uprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D,double* deviceWilsonLinks, int fixTime , char *filename ,int Nmom , int momElem[][3], int NmomSink, int momSink[][3]){
// ! fix time is the absolute sink time
char particles_filename[MAX_PARTICLES][257];
FILE *file_Nonlocal[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_Nonlocal.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_Nonlocal.dat");
if(comm_rank() == 0){
file_Nonlocal[0] = fopen(particles_filename[0],"a");
file_Nonlocal[1] = fopen(particles_filename[1],"a");
if(file_Nonlocal[0] == NULL || file_Nonlocal[1] == NULL ){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
// QKXTM_Vector3D *levelVec = new QKXTM_Vector3D[8];
// QKXTM_Vector3D *levelVec = malloc(8*sizeof(QKXTM_Vector3D));
//levelVec[0]->QKXTM_Vector3D();
cudaBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
cudaBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
cudaBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *proton_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(proton_level1 == NULL || proton_level3 == NULL || neutron_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_upart_proton(proton_level1,proton_level3,*levelVec,NmomSink,momSink);
partialContract3pf_upart_neutron(neutron_level3,*levelVec,NmomSink,momSink);
cudaUnbindTexture(xiVector3DStochTex);
cudaUnbindTexture(uprop3DStochTex);
cudaUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ Non local operators +++++++++++++++++++//
cudaBindTexture(0, propStochTex, uprop.D_elem(), uprop.Bytes());
cudaBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int dir = 0 ; dir < 3 ; dir++)
for(int dl = 0 ; dl < G_totalL[dir]/2 ; dl++){
insLine_Nonlocal_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() ,(double2*) deviceWilsonLinks ,dl,dir); // (1,2,3) (upart,dpart,spart)
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
printfQuda("Finish insertion line\n");
delete insLine;
cudaUnbindTexture(phiVectorStochTex);
cudaUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*NmomSink*4*4*2*sizeof(double));
// write local
for(int dir = 0 ; dir < 3 ; dir++)
for(int iflag = 0 ; iflag < G_totalL[dir]/2 ; iflag++){
finalize_contract3pf_mixLevel_SinkMom((Complex*) res,(Complex*) (insLineMom +dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3,(Complex*) proton_level1, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[0],res,dir,iflag,NmomSink,momSink);
finalize_contract3pf_oneLevel_SinkMom((Complex*) res,(Complex*) (insLineMom +dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[1],res,dir,iflag,NmomSink,momSink);
}
free(res);
if(comm_rank()==0){
fclose(file_Nonlocal[0]);
fclose(file_Nonlocal[1]);
}
free(insLineMom);
free(proton_level1);
free(proton_level3);
free(neutron_level3);
checkCudaError();
}
void quda::threepStochDpart_WilsonLinks( QKXTM_Vector &phi , QKXTM_Vector3D &xi ,QKXTM_Propagator &dprop , QKXTM_Propagator3D &uprop3D , QKXTM_Propagator3D &dprop3D, double* deviceWilsonLinks, int fixTime , char *filename ,int Nmom , int momElem[][3], int NmomSink, int momSink[][3]){
// ! fix time is the absolute sink time
char particles_filename[MAX_PARTICLES][257];
FILE *file_Nonlocal[MAX_PARTICLES];
// FILE *file_oneD[MAX_PARTICLES];
sprintf(particles_filename[0],"%s_%s",filename,"proton_Nonlocal.dat");
sprintf(particles_filename[1],"%s_%s",filename,"neutron_Nonlocal.dat");
if(comm_rank() == 0){
file_Nonlocal[0] = fopen(particles_filename[0],"a");
file_Nonlocal[1] = fopen(particles_filename[1],"a");
if(file_Nonlocal[0] == NULL || file_Nonlocal[1] == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
// here we will calculate part of the contraction ----------------
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec = new QKXTM_Vector3D();
cudaBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
cudaBindTexture(0, dprop3DStochTex,dprop3D.D_elem(), dprop3D.Bytes());
cudaBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *neutron_level1 = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *neutron_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
double *proton_level3 = (double*) malloc(4*4*NmomSink*4*3*2*sizeof(double));
if(neutron_level1 == NULL || neutron_level3 == NULL || proton_level3 == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_dpart_neutron(neutron_level1,neutron_level3,*levelVec,NmomSink,momSink);
partialContract3pf_dpart_proton(proton_level3,*levelVec,NmomSink,momSink);
cudaUnbindTexture(xiVector3DStochTex);
cudaUnbindTexture(uprop3DStochTex);
cudaUnbindTexture(dprop3DStochTex);
delete levelVec;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
// +++++++++++++++ local operators +++++++++++++++++++// (10 local operator )
// for local operators we use 1 , g1 , g2 , g3 , g4 , g5 , g5g1 , g5g2 , g5g3 , g5g4
// so we map operators to integers 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9
cudaBindTexture(0, propStochTex, dprop.D_elem(), dprop.Bytes());
cudaBindTexture(0, phiVectorStochTex, phi.D_elem(), phi.Bytes());
QKXTM_Vector *insLine = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int dir = 0 ; dir < 3 ; dir++)
for(int dl = 0 ; dl < G_totalL[dir]/2 ; dl++){
insLine_Nonlocal_kernel<<<gridDim,blockDim>>>((double2*) insLine->D_elem() , (double2*) deviceWilsonLinks ,dl,dir); // (1,2,3) (upart,dpart,spart)
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //
printfQuda("Finish insertion line\n");
delete insLine;
cudaUnbindTexture(phiVectorStochTex);
cudaUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res = (double*) malloc(G_localL[3]*NmomSink*4*4*2*sizeof(double));
// write nonlocal
for(int dir = 0 ; dir < 3 ; dir++)
for(int iflag = 0 ; iflag < G_totalL[0]/2 ; iflag++){
finalize_contract3pf_mixLevel_SinkMom((Complex*) res,(Complex*) (insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) neutron_level3,(Complex*) neutron_level1, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[1],res,dir,iflag,NmomSink,momSink);
finalize_contract3pf_oneLevel_SinkMom((Complex*) res,(Complex*) (insLineMom + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) proton_level3, NmomSink, momSink);
write_3pf_Nonlocal_zeroMomIns(file_Nonlocal[0],res,dir,iflag,NmomSink,momSink);
}
free(res);
if(comm_rank()==0){
fclose(file_Nonlocal[0]);
fclose(file_Nonlocal[1]);
}
free(insLineMom);
free(neutron_level1);
free(neutron_level3);
free(proton_level3);
checkCudaError();
}
//#define TEST_PION
void quda::threepStochPion_WilsonLinks( QKXTM_Vector &dphi , QKXTM_Vector3D &xi ,QKXTM_Propagator &uprop , QKXTM_Propagator3D &uprop3D , double* deviceWilsonLinks, int fixTime , char *filename ,int Nmom , int momElem[][3], int NmomSink, int momSink[][3]){
char pion_filename_up[257],pion_filename_down[257];
FILE *filePion_up;
FILE *filePion_down;
sprintf(pion_filename_up,"%s_%s",filename,"pion_Nonlocal_up.dat");
sprintf(pion_filename_down,"%s_%s",filename,"pion_Nonlocal_down.dat");
if( comm_rank() == 0){
filePion_up = fopen(pion_filename_up,"a");
filePion_down = fopen(pion_filename_down,"a");
if(filePion_up == NULL || filePion_down == NULL){
fprintf(stderr,"Error open files for writting : %s\n",strerror(errno));
MPI_Abort(MPI_COMM_WORLD,-1);
}
}
printfQuda("Start partial contraction\n");
QKXTM_Vector3D *levelVec_up = new QKXTM_Vector3D();
QKXTM_Vector3D *levelVec_down = new QKXTM_Vector3D();
cudaBindTexture(0, uprop3DStochTex,uprop3D.D_elem(), uprop3D.Bytes());
cudaBindTexture(0, xiVector3DStochTex, xi.D_elem(), xi.Bytes());
double *pion_level_up = (double*) malloc(NmomSink*4*3*2*sizeof(double));
double *pion_level_down = (double*) malloc(NmomSink*4*3*2*sizeof(double));
if(pion_level_up == NULL || pion_level_down == NULL){
fprintf(stderr,"Error allocate host memory for partial contraction\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
partialContract3pf_upart_pion(pion_level_up,*levelVec_up,NmomSink,momSink);
partialContract3pf_dpart_pion(pion_level_down,*levelVec_down,NmomSink,momSink);
#ifdef TEST_PION
FILE *ptr_Vx;
ptr_Vx = fopen("VX_quda.dat","w");
levelVec_up->download();
for(int mu = 0 ; mu < 4 ; mu++)
for(int c = 0 ; c < 3 ; c++)
fprintf(ptr_Vx,"%d %d %+e %+e\n",mu,c,pion_level_up[mu*3*2+c*2+0],pion_level_up[mu*3*2+c*2+1]);
// for(int iv3 = 0 ; iv3 < G_localVolume/G_localL[3] ; iv3++)
// for(int mu = 0 ; mu < 4 ; mu++)
// for(int c = 0 ; c < 3 ; c++)
// fprintf(ptr_Vx,"%d %d %+e %+e\n",mu,c,levelVec_up->H_elem()[iv3*4*3*2+mu*3*2+c*2+0],levelVec_up->H_elem()[iv3*4*3*2+mu*3*2+c*2+1]);
fclose(ptr_Vx);
#endif
cudaUnbindTexture(uprop3DStochTex);
cudaUnbindTexture(xiVector3DStochTex);
delete levelVec_up;
delete levelVec_down;
printfQuda("Finish partial contraction\n");
// ---------------------------------------------------------------
// execution domain
dim3 blockDim( THREADS_PER_BLOCK , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
double *insLineMom_up = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
double *insLineMom_down = (double*) malloc(3*(G_totalL[0]/2)*G_localL[3]*Nmom*4*3*2*sizeof(double));
if(insLineMom_up == NULL || insLineMom_down == NULL){
fprintf(stderr,"Error allocate host memory for insLineMom\n");
MPI_Abort(MPI_COMM_WORLD,-1);
}
cudaBindTexture(0, propStochTex, uprop.D_elem(), uprop.Bytes());
cudaBindTexture(0, phiVectorStochTex, dphi.D_elem(), dphi.Bytes());
QKXTM_Vector *insLine_up = new QKXTM_Vector();
QKXTM_Vector *insLine_down = new QKXTM_Vector();
printfQuda("Start Insertion line\n");
for(int dir = 0 ; dir < 3 ; dir++)
for(int dl = 0 ; dl < G_totalL[dir]/2 ; dl++){
// upart
insLine_Nonlocal_pion_kernel<<<gridDim,blockDim>>>((double2*) insLine_up->D_elem() , (double2*) deviceWilsonLinks ,dl,dir, 0);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom_up + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine_up->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
//dpart
insLine_Nonlocal_pion_kernel<<<gridDim,blockDim>>>((double2*) insLine_down->D_elem() , (double2*) deviceWilsonLinks ,dl,dir, 1);
cudaDeviceSynchronize(); // to make sure that we have the data in corr
checkCudaError();
insLineFourier(insLineMom_down + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2 + dl*G_localL[3]*Nmom*4*3*2 ,insLine_down->D_elem() , Nmom , momElem );
cudaDeviceSynchronize();
checkCudaError();
}
#ifdef TEST_PION
FILE *ptr_Vt;
ptr_Vt = fopen("VT_quda.dat","w");
for(int it = 0 ; it < G_localL[3] ; it++)
for(int mu = 0 ; mu < 4 ; mu++)
for(int c = 0 ; c < 3 ; c++)
fprintf(ptr_Vt,"%d %d %d %+e %+e\n",it,mu,c,insLineMom_up[it*4*3*2+mu*3*2+c*2+0],insLineMom_up[it*4*3*2+mu*3*2+c*2+1]);
fclose(ptr_Vt);
FILE *ptr_Vt_full;
ptr_Vt_full = fopen("VT_quda_full.dat","w");
insLine_up->download();
for(int iv = 0 ; iv < G_localVolume ; iv++)
for(int mu = 0 ; mu < 4 ; mu++)
for(int c = 0 ; c < 3 ; c++)
fprintf(ptr_Vt_full,"%d %d %d %+e %+e\n",iv,mu,c,insLine_up->H_elem()[iv*4*3*2 + mu*3*2 + c*2 + 0],insLine_up->H_elem()[iv*4*3*2 + mu*3*2 + c*2 + 1]);
fclose(ptr_Vt_full);
#endif
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //
printfQuda("Finish insertion line\n");
delete insLine_up;
delete insLine_down;
cudaUnbindTexture(phiVectorStochTex);
cudaUnbindTexture(propStochTex);
//+++++++++++++++++++++++++++++++++++ finish insertion line
double *res_up = (double*) malloc(G_localL[3]*NmomSink*2*sizeof(double));
double *res_down = (double*) malloc(G_localL[3]*NmomSink*2*sizeof(double));
for(int dir = 0 ; dir < 3 ; dir++)
for(int iflag = 0 ; iflag < G_totalL[0]/2 ; iflag++){
finalize_contract3pf_Pion_SinkMom((Complex*) res_up,(Complex*) (insLineMom_up + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) pion_level_up, NmomSink, momSink);
finalize_contract3pf_Pion_SinkMom((Complex*) res_down,(Complex*) (insLineMom_down + dir*(G_totalL[dir]/2)*G_localL[3]*Nmom*4*3*2+ iflag*G_localL[3]*Nmom*4*3*2 ),(Complex*) pion_level_down, NmomSink, momSink);
write_3pf_Nonlocal_Pion_zeroMomIns(filePion_up,res_up,dir,iflag,NmomSink,momSink);
write_3pf_Nonlocal_Pion_zeroMomIns(filePion_down,res_down,dir,iflag,NmomSink,momSink);
}
free(res_up);
free(res_down);
if(comm_rank()==0){
fclose(filePion_up);
fclose(filePion_down);
}
free(insLineMom_up);
free(insLineMom_down);
free(pion_level_up);
free(pion_level_down);
checkCudaError();
}
#undef MAX_PARTICLES
#define THREADS_PER_BLOCK_ARNOLDI 64
void Arnoldi::uploadToCuda(cudaColorSpinorField &cudaVector, int offset){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
uploadToCuda_kernel<<<gridDim,blockDim>>>( (double2*) (d_mNxM + offset*G_localVolume*G_nSpin*G_nColor) , (double2*) pointEven, (double2*) pointOdd);
cudaDeviceSynchronize();
checkCudaError();
}
void Arnoldi::downloadFromCuda(cudaColorSpinorField &cudaVector, int offset){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
downloadFromCuda_kernel<<<gridDim,blockDim>>>( (double2*) (d_mNxM + offset*G_localVolume*G_nSpin*G_nColor) , (double2*) pointEven, (double2*) pointOdd);
cudaDeviceSynchronize();
checkCudaError();
}
/*
void Arnoldi::matrixNxMmatrixMxL(Arnoldi &V,int NL, int M,int L,bool transpose){
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
cudaMemcpyToSymbol(c_matrixQ , d_mMxM , M*M*sizeof(double2),0,cudaMemcpyDeviceToDevice );
checkCudaError();
matrixNxMmatrixMxL_kernel<<<gridDim,blockDim,blockDim.x*M*sizeof(Complex)>>>( (double2*) V.D_mNxM(), NL , M, L ,transpose);
cudaDeviceSynchronize();
checkCudaError();
}
*/
void Arnoldi::matrixNxMmatrixMxLReal(Arnoldi &V,int NL, int M,int L,bool transpose){
dim3 blockDim( THREADS_PER_BLOCK_ARNOLDI , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
Complex *h_Q = (Complex*) calloc(M*M,sizeof(Complex));
double *h_Qr = (double*) calloc(M*M,sizeof(double));
cudaMemcpy((void*)h_Q,(void*) d_mMxM, M*M*sizeof(Complex), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < M ; i++)
for(int j = 0 ; j < M ; j++)
h_Qr[i*M+j] = h_Q[i*M+j].real();
cudaMemcpyToSymbol(c_matrixQ , h_Qr , M*M*sizeof(double) );
checkCudaError();
matrixNxMmatrixMxLReal_kernel<<<gridDim,blockDim,blockDim.x*M*sizeof(Complex)>>>( (double2*) V.D_mNxM(), NL , M, L ,transpose);
cudaDeviceSynchronize();
checkCudaError();
}
void quda::clearNoiseCuda(Complex *A, int L, double tolerance){
cudaMemcpyToSymbol(c_tolArnoldi , &tolerance , sizeof(double));
checkCudaError();
dim3 blockDim( L , 1, 1);
dim3 gridDim( 1 , 1 , 1);
noiseCleaner_kernel<<<gridDim,blockDim,blockDim.x*sizeof(double)>>>((double2*)A);
cudaDeviceSynchronize();
checkCudaError();
}
#define THREADS_PER_BLOCK_LANCZOS 64
void Lanczos::uploadToCuda(cudaColorSpinorField &cudaVector, int offset){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_LANCZOS , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
uploadToCuda_kernel<<<gridDim,blockDim>>>( (double2*) (d_mNxM + offset*G_localVolume*G_nSpin*G_nColor) , (double2*) pointEven, (double2*) pointOdd);
cudaDeviceSynchronize();
checkCudaError();
}
void Lanczos::downloadFromCuda(cudaColorSpinorField &cudaVector){
double *pointEven = (double*) cudaVector.Even().V(); // take the pointer to even and odd memory location
double *pointOdd = (double*) cudaVector.Odd().V();
dim3 blockDim( THREADS_PER_BLOCK_LANCZOS , 1, 1);
dim3 gridDim( (G_localVolume/2 + blockDim.x -1)/blockDim.x , 1 , 1); // half G_localVolume threads now
downloadFromCuda_kernel<<<gridDim,blockDim>>>( (double2*) d_vN , (double2*) pointEven, (double2*) pointOdd);
cudaDeviceSynchronize();
checkCudaError();
}
void Lanczos::matrixNxMmatrixMxLReal(Lanczos &V,int NL, int M,int L,bool transpose){
dim3 blockDim( THREADS_PER_BLOCK_LANCZOS , 1, 1);
dim3 gridDim( (G_localVolume + blockDim.x -1)/blockDim.x , 1 , 1);
#ifdef __MATRIX_IN_CONSTANT_MEMORY__
double *h_Q = (double*) calloc(M*M,sizeof(double));
cudaMemcpy((void*)h_Q,(void*) d_mMxM, M*M*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(c_matrixQ , h_Q , M*M*sizeof(double) );
checkCudaError();
free(h_Q);
matrixNxMmatrixMxLReal_kernel<<<gridDim,blockDim,blockDim.x*M*sizeof(Complex)>>>( (double2*) V.D_mNxM(), NL , M, L ,transpose);
#else
printfQuda("ChechPoint 1\n");
fflush(stdout);
cudaBindTexture(0,matrixTex,d_mMxM,size);
checkCudaError();
printfQuda("ChechPoint 2\n");
fflush(stdout);
matrixNxMmatrixMxLRealTexture_kernel<<<gridDim,blockDim,blockDim.x*M*sizeof(Complex)>>>( (double2*) V.D_mNxM(), NL , M, L ,transpose);
checkCudaError();
printfQuda("ChechPoint 3\n");
fflush(stdout);
cudaUnbindTexture(matrixTex);
printfQuda("ChechPoint 4\n");
fflush(stdout);
#endif
cudaDeviceSynchronize();
checkCudaError();
}
void Lanczos::makeTridiagonal(int m,int l){
dim3 blockDim( m , 1, 1);
dim3 gridDim( l , 1 , 1);
makeTridiagonal_kernel<<<gridDim,blockDim>>>((double*) this->D_mMxM());
cudaDeviceSynchronize();
checkCudaError();
}
|
9f9dcccf699b0516b94d13e16c990ed92cd609f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <iostream>
#include <cassert>
#include "KNearestNeighbors.cuh"
#include "ComputeNeighbors.cuh"
//// CPU Version of Neighbors algorithm ////
void GetKNearestNeighborsCPU(const size_t p, const std::vector<Point>& points, std::vector<size_t>& neighbors, unsigned int k)
{
neighbors.resize(k);
std::vector<double> distance(k, 1000);
for (size_t q = 0; q < points.size(); q++)
{
//check that we're not calculating the distance between p and itself
if (q != p)
{
//calcuate the distance between p and q
double d = sqrt(pow(points[p].x - points[q].x, 2) + pow(points[p].y - points[q].y, 2));
//check if q is nearer than the farest of the nearest point
auto max = std::max_element(distance.begin(), distance.end());
if (d < *max)
{
// store the distance and index of q
distance[std::distance(distance.begin(), max)] = d;
neighbors[std::distance(distance.begin(), max)] = q;
}
}
}
}
void GetKNearestNeighborsGPU(const std::vector<Point>& points, std::vector< std::vector<size_t> >& AllNeighbors, unsigned int k)
{
std::vector<size_t> neighbors;
Point* CPUpoints = (Point*)malloc(points.size() * sizeof(Point));
size_t* CPUneighbors = (size_t*)malloc(points.size() * k * sizeof(size_t));
// instanciate points coordinates
for(int i = 0; i < points.size(); i++)
{
CPUpoints[i] = points[i];
}
// GPU variables
Point* GPUpoints;
size_t* GPUneighbors;
double* GPUdistance;
assert(hipMalloc((void**)&GPUpoints, points.size() * sizeof(Point)) == hipSuccess);
assert(hipMalloc((void**)&GPUneighbors, points.size() * k * sizeof(size_t)) == hipSuccess);
assert(hipMalloc((void**)&GPUdistance, points.size()*k * sizeof(double)) == hipSuccess);
// send points coordinates to GPU memory
assert(hipMemcpy(GPUpoints, CPUpoints, points.size() * sizeof(Point), hipMemcpyHostToDevice) == hipSuccess);
std::cout << "Computing neighbors..." << std::endl;
hipLaunchKernelGGL(( ComputeNeighbors), dim3((points.size()/512)+1), dim3(512) , 0, 0, GPUpoints, GPUneighbors, GPUdistance, points.size(), k);
hipDeviceSynchronize();
// recover the neighbors indexes from GPU memory
assert(hipMemcpy(CPUneighbors, GPUneighbors, points.size() * k * sizeof(size_t), hipMemcpyDeviceToHost) == hipSuccess);
// make sure that neighbors vector is at good size
neighbors.resize(k);
// make sure that AllNeighbors vector is empty
AllNeighbors.clear();
for(int i = 0; i < points.size(); i++)
{
for(int j = 0; j < k; j++)
{
neighbors[j] = CPUneighbors[i*k + j];
}
// ad vector of neighbors to vector of all neighbors
AllNeighbors.push_back(neighbors);
}
free(CPUpoints);
free(CPUneighbors);
hipFree(GPUpoints);
hipFree(GPUneighbors);
hipFree(GPUdistance);
}
|
9f9dcccf699b0516b94d13e16c990ed92cd609f5.cu
|
#include <algorithm>
#include <iostream>
#include <cassert>
#include "KNearestNeighbors.cuh"
#include "ComputeNeighbors.cuh"
//// CPU Version of Neighbors algorithm ////
void GetKNearestNeighborsCPU(const size_t p, const std::vector<Point>& points, std::vector<size_t>& neighbors, unsigned int k)
{
neighbors.resize(k);
std::vector<double> distance(k, 1000);
for (size_t q = 0; q < points.size(); q++)
{
//check that we're not calculating the distance between p and itself
if (q != p)
{
//calcuate the distance between p and q
double d = sqrt(pow(points[p].x - points[q].x, 2) + pow(points[p].y - points[q].y, 2));
//check if q is nearer than the farest of the nearest point
auto max = std::max_element(distance.begin(), distance.end());
if (d < *max)
{
// store the distance and index of q
distance[std::distance(distance.begin(), max)] = d;
neighbors[std::distance(distance.begin(), max)] = q;
}
}
}
}
void GetKNearestNeighborsGPU(const std::vector<Point>& points, std::vector< std::vector<size_t> >& AllNeighbors, unsigned int k)
{
std::vector<size_t> neighbors;
Point* CPUpoints = (Point*)malloc(points.size() * sizeof(Point));
size_t* CPUneighbors = (size_t*)malloc(points.size() * k * sizeof(size_t));
// instanciate points coordinates
for(int i = 0; i < points.size(); i++)
{
CPUpoints[i] = points[i];
}
// GPU variables
Point* GPUpoints;
size_t* GPUneighbors;
double* GPUdistance;
assert(cudaMalloc((void**)&GPUpoints, points.size() * sizeof(Point)) == cudaSuccess);
assert(cudaMalloc((void**)&GPUneighbors, points.size() * k * sizeof(size_t)) == cudaSuccess);
assert(cudaMalloc((void**)&GPUdistance, points.size()*k * sizeof(double)) == cudaSuccess);
// send points coordinates to GPU memory
assert(cudaMemcpy(GPUpoints, CPUpoints, points.size() * sizeof(Point), cudaMemcpyHostToDevice) == cudaSuccess);
std::cout << "Computing neighbors..." << std::endl;
ComputeNeighbors<<< (points.size()/512)+1, 512 >>>(GPUpoints, GPUneighbors, GPUdistance, points.size(), k);
cudaDeviceSynchronize();
// recover the neighbors indexes from GPU memory
assert(cudaMemcpy(CPUneighbors, GPUneighbors, points.size() * k * sizeof(size_t), cudaMemcpyDeviceToHost) == cudaSuccess);
// make sure that neighbors vector is at good size
neighbors.resize(k);
// make sure that AllNeighbors vector is empty
AllNeighbors.clear();
for(int i = 0; i < points.size(); i++)
{
for(int j = 0; j < k; j++)
{
neighbors[j] = CPUneighbors[i*k + j];
}
// ad vector of neighbors to vector of all neighbors
AllNeighbors.push_back(neighbors);
}
free(CPUpoints);
free(CPUneighbors);
cudaFree(GPUpoints);
cudaFree(GPUneighbors);
cudaFree(GPUdistance);
}
|
d547cd12611e1a01e07dfce739917fb6fb3b8928.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define VECT_SIZE 10000000
#define THRESHOLD 1.e-7
__global__ void VectorAdd(float *w, float *u, float *v, int n) {
int index = threadIdx.x;
int step = blockDim.x;
for (int i = index; i < n; i += step) {
w[i] = u[i] + v[i];
}
}
void fill(float *v) {
for (int i = 0; i < VECT_SIZE; i++) {
v[i] = (float)i;
}
}
int main() {
// Host pointers
float *u, *v, *w;
// Device pointers
float *u_device, *v_device, *w_device;
// Alloco mem host
u = (float *) malloc(sizeof(float)*VECT_SIZE);
v = (float *) malloc(sizeof(float)*VECT_SIZE);
w = (float *) malloc(sizeof(float)*VECT_SIZE);
fill(u);
fill(v);
// Alloco mem device
hipMalloc((void **) &u_device, sizeof(float)*VECT_SIZE);
hipMalloc((void **) &v_device, sizeof(float)*VECT_SIZE);
hipMalloc((void **) &w_device, sizeof(float)*VECT_SIZE);
// H --> D
hipMemcpy(u_device, u, sizeof(float)*VECT_SIZE, hipMemcpyHostToDevice);
hipMemcpy(v_device, v, sizeof(float)*VECT_SIZE, hipMemcpyHostToDevice);
// Kernel call
hipLaunchKernelGGL(( VectorAdd), dim3(1),dim3(256), 0, 0, w_device, u_device, v_device, VECT_SIZE);
// D --> H
hipMemcpy(w, w_device, sizeof(float)*VECT_SIZE, hipMemcpyDeviceToHost);
// check ?
for (int i = 0; i < VECT_SIZE; i++) {
if (!((w[i]-u[i]-v[i]) < THRESHOLD)) {
fprintf(stderr,"Got mistake!\n");
}
}
hipFree(u_device);
hipFree(v_device);
hipFree(w_device);
free(u);
free(v);
free(w);
}
|
d547cd12611e1a01e07dfce739917fb6fb3b8928.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define VECT_SIZE 10000000
#define THRESHOLD 1.e-7
__global__ void VectorAdd(float *w, float *u, float *v, int n) {
int index = threadIdx.x;
int step = blockDim.x;
for (int i = index; i < n; i += step) {
w[i] = u[i] + v[i];
}
}
void fill(float *v) {
for (int i = 0; i < VECT_SIZE; i++) {
v[i] = (float)i;
}
}
int main() {
// Host pointers
float *u, *v, *w;
// Device pointers
float *u_device, *v_device, *w_device;
// Alloco mem host
u = (float *) malloc(sizeof(float)*VECT_SIZE);
v = (float *) malloc(sizeof(float)*VECT_SIZE);
w = (float *) malloc(sizeof(float)*VECT_SIZE);
fill(u);
fill(v);
// Alloco mem device
cudaMalloc((void **) &u_device, sizeof(float)*VECT_SIZE);
cudaMalloc((void **) &v_device, sizeof(float)*VECT_SIZE);
cudaMalloc((void **) &w_device, sizeof(float)*VECT_SIZE);
// H --> D
cudaMemcpy(u_device, u, sizeof(float)*VECT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(v_device, v, sizeof(float)*VECT_SIZE, cudaMemcpyHostToDevice);
// Kernel call
VectorAdd<<<1,256>>>(w_device, u_device, v_device, VECT_SIZE);
// D --> H
cudaMemcpy(w, w_device, sizeof(float)*VECT_SIZE, cudaMemcpyDeviceToHost);
// check ?
for (int i = 0; i < VECT_SIZE; i++) {
if (!((w[i]-u[i]-v[i]) < THRESHOLD)) {
fprintf(stderr,"Got mistake!\n");
}
}
cudaFree(u_device);
cudaFree(v_device);
cudaFree(w_device);
free(u);
free(v);
free(w);
}
|
7e1e19ac2b2119e212b742fee91c13ffc2420bb6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "conv_ops_2d.h"
namespace nnet {
namespace nnet_internal {
//kernel function for forward pass convolutions
__global__ void d_filter_convolve_2d(
float* input,
float* filter,
float* output,
shape input_shape,
shape filter_shape,
shape filter_chunks,
shape output_shape,
shape padding,
int filter_no,
int n_filters,
size_t batch_size) {
//declare shared memory for the filter and the loaded block. Copying to shared memory reduces overall data loads
__shared__ float s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * FILTER_BLOCK_SIZE_K];
__shared__ float s_load_block[(CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K * CONV_BATCH_DEPTH];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int f_tile_n = blockIdx.x % filter_chunks.width * FILTER_BLOCK_SIZE_N;
int f_tile_m = blockIdx.y % filter_chunks.height * FILTER_BLOCK_SIZE_M;
int f_tile_k = blockIdx.z % filter_chunks.depth * FILTER_BLOCK_SIZE_K;
//3d block tile ids. Each block tile is loaded in a different thread block
int b_tile_n = (blockIdx.x / filter_chunks.width) * CONV_BLOCK_SIZE_N;
int b_tile_m = (blockIdx.y / filter_chunks.height) * CONV_BLOCK_SIZE_M;
int b_tile_k = f_tile_k;
//the current element in the batch
int b_elem = (blockIdx.z / filter_chunks.depth) * CONV_BATCH_DEPTH;
//load filter chunk into shared memory
#pragma unroll
for (int f_load_k = 0; f_load_k < FILTER_BLOCK_SIZE_K; f_load_k += CONV_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = f_load_k + k_id;
int g_load_k_id = f_tile_k + load_k_id;
#pragma unroll
for (int f_load_m = 0; f_load_m < FILTER_BLOCK_SIZE_M; f_load_m += CONV_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = f_load_m + m_id;
int g_load_m_id = f_tile_m + load_m_id;
#pragma unroll
for (int f_load_n = 0; f_load_n < FILTER_BLOCK_SIZE_N; f_load_n += CONV_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = f_load_n + n_id;
int g_load_n_id = f_tile_n + load_n_id;
//the index in shared memory for the current load element
int s_index = load_k_id * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M +
load_m_id * FILTER_BLOCK_SIZE_N +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (g_load_n_id < filter_shape.width &&
g_load_m_id < filter_shape.height &&
g_load_k_id < filter_shape.depth &&
b_elem < batch_size) {
//the index in global memory for the current load element
int g_index = g_load_k_id * filter_shape.width * filter_shape.height +
g_load_m_id * filter_shape.width +
g_load_n_id;
//load the element into shared memory
s_filter[s_index] = filter[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_filter[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete filter loading)
cuda_syncthreads();
//load input chunk
#pragma unroll
for (int batch = 0; batch < CONV_BATCH_DEPTH; batch++) {
#pragma unroll
for (int b_load_k = 0; b_load_k < CONV_BLOCK_SIZE_K; b_load_k += CONV_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = b_load_k + k_id;
int g_load_k_id = b_tile_k + load_k_id;
#pragma unroll
for (int b_load_m = 0; b_load_m < CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M; b_load_m += CONV_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides. The global element is padded
int load_m_id = b_load_m + m_id;
int g_load_m_id = f_tile_m + b_tile_m + load_m_id - padding.height;
#pragma unroll
for (int b_load_n = 0; b_load_n < CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N; b_load_n += CONV_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides. The global element is padded
int load_n_id = b_load_n + n_id;
int g_load_n_id = f_tile_n + b_tile_n + load_n_id - padding.width;
//the index in shared memory for the current load element
int s_index = batch * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K +
load_k_id * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) +
load_m_id * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (0 <= g_load_n_id &&
g_load_n_id < input_shape.width &&
0 <= g_load_m_id &&
g_load_m_id < input_shape.height &&
g_load_k_id < input_shape.depth &&
b_elem + batch < batch_size) {
//the index in global memory for the current load element
int g_index = (b_elem + batch) * input_shape.width * input_shape.height * input_shape.depth +
g_load_k_id * input_shape.width * input_shape.height +
g_load_m_id * input_shape.width +
g_load_n_id;
//load the element into shared memory
s_load_block[s_index] = input[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_load_block[s_index] = 0.0;
}
}
}
}
}
//synchronise the threads in this block (to complete block loading)
cuda_syncthreads();
//convolve and write
#pragma unroll
for (int batch = 0; batch < CONV_BATCH_DEPTH; batch++) {
if (k_id == 0) {
#pragma unroll
for (int stride_m = 0; stride_m < CONV_THREAD_BLOCK_M; stride_m++) {
//stride index for the m direction
int start_m = m_id * CONV_THREAD_BLOCK_M + stride_m;
#pragma unroll
for (int stride_n = 0; stride_n < CONV_THREAD_BLOCK_N; stride_n++) {
//stride index for the n direction
int start_n = n_id * CONV_THREAD_BLOCK_N + stride_n;
//get the output indices for writing this element
int out_n_id = b_tile_n + start_n;
int out_m_id = b_tile_m + start_m;
int out_k_id = (b_elem + batch) * n_filters + filter_no;
//check that this element is within the range of the output shape
//to avoid illegal addressing
if (out_n_id < output_shape.width &&
out_m_id < output_shape.height &&
b_elem + batch < batch_size) {
//the writing address
int out_index = out_k_id * output_shape.width * output_shape.height +
out_m_id * output_shape.width +
out_n_id;
//calculate one dot product and caches it
float inc = calculate_conv2d_dot<CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N, CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M, CONV_THREAD_BLOCK_K>(
s_filter,
&s_load_block[batch * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K],
start_n,
start_m
);
//accumulate the dot product to the output memory at the writing address
//this is an atomic operation to avoid memory races
atomic_add(&output[out_index], inc);
}
}
}
}
}
}
//kernel function for backward pass convolutions
__global__ void d_filter_outer_convolve_2d(
float* input,
float* filter,
float* output,
shape input_shape,
shape filter_shape,
shape filter_chunks,
shape output_shape,
shape padding,
int filter_no,
size_t batch_size) {
//declare shared memory for the filter and the loaded block. Copying to shared memory reduces overall data loads
__shared__ float s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * FILTER_BLOCK_SIZE_K];
__shared__ float s_load_block[(CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_OUTER_BLOCK_SIZE_K];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int f_tile_n = blockIdx.x % filter_chunks.width * FILTER_BLOCK_SIZE_N;
int f_tile_m = blockIdx.y % filter_chunks.height * FILTER_BLOCK_SIZE_M;
int f_tile_k = blockIdx.z % filter_chunks.depth * FILTER_BLOCK_SIZE_K;
//3d block tile ids. Each block tile is loaded in a different thread block
int b_tile_n = blockIdx.x / filter_chunks.width * CONV_OUTER_BLOCK_SIZE_N;
int b_tile_m = blockIdx.y / filter_chunks.height * CONV_OUTER_BLOCK_SIZE_M;
//the current element in the batch
int b_elem = (blockIdx.z / filter_chunks.depth) * CONV_OUTER_BLOCK_SIZE_K;
//load filter chunk
#pragma unroll
for (int f_load_k = 0; f_load_k < FILTER_BLOCK_SIZE_K; f_load_k += CONV_OUTER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = f_load_k + k_id;
int g_load_k_id = f_tile_k + load_k_id;
#pragma unroll
for (int f_load_m = 0; f_load_m < FILTER_BLOCK_SIZE_M; f_load_m += CONV_OUTER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = f_load_m + m_id;
int g_load_m_id = f_tile_m + load_m_id;
#pragma unroll
for (int f_load_n = 0; f_load_n < FILTER_BLOCK_SIZE_N; f_load_n += CONV_OUTER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = f_load_n + n_id;
int g_load_n_id = f_tile_n + load_n_id;
//the index in shared memory for the current load element
int s_index = load_k_id * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M +
load_m_id * FILTER_BLOCK_SIZE_N +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (g_load_n_id < filter_shape.width &&
g_load_m_id < filter_shape.height &&
g_load_k_id < filter_shape.depth) {
//the index in global memory for the current load element
int g_index = g_load_k_id * filter_shape.width * filter_shape.height +
(filter_shape.height - 1 - g_load_m_id) * filter_shape.width +
(filter_shape.width - 1 - g_load_n_id);
//load the element into shared memory
s_filter[s_index] = filter[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_filter[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete filter loading)
cuda_syncthreads();
//load input block + edge
#pragma unroll
for (int b_load_k = 0; b_load_k < CONV_OUTER_BLOCK_SIZE_K; b_load_k += CONV_OUTER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = b_load_k + k_id;
int g_load_k_id = b_elem + load_k_id;
#pragma unroll
for (int b_load_m = 0; b_load_m < CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M; b_load_m += CONV_OUTER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = b_load_m + m_id;
int g_load_m_id = b_tile_m + load_m_id - filter_shape.height + f_tile_m + 1 + padding.height;
#pragma unroll
for (int b_load_n = 0; b_load_n < CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N; b_load_n += CONV_OUTER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = b_load_n + n_id;
int g_load_n_id = b_tile_n + load_n_id - filter_shape.width + f_tile_n + 1 + padding.width;
//the index in shared memory for the current load element
int s_index = load_k_id * (CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) +
load_m_id * (CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (0 <= g_load_n_id &&
g_load_n_id < input_shape.width &&
0 <= g_load_m_id &&
g_load_m_id < input_shape.height &&
g_load_k_id < batch_size) {
//the index in global memory for the current load element
int g_index = g_load_k_id * input_shape.width * input_shape.height * input_shape.depth +
filter_no * input_shape.width * input_shape.height +
g_load_m_id * input_shape.width +
g_load_n_id;
//load the element into shared memory
s_load_block[s_index] = input[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_load_block[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete block loading)
cuda_syncthreads();
//convole and accumulate to output
#pragma unroll
for (int f_k = 0; f_k < FILTER_BLOCK_SIZE_K; f_k++) {
//stride index for the filter
int out_k_id = f_tile_k + f_k;
#pragma unroll
for (int b_k = 0; b_k < CONV_OUTER_THREAD_BLOCK_K; b_k++) {
//stride index for the k direction
int b_k_id = k_id * CONV_OUTER_THREAD_BLOCK_K + b_k;
#pragma unroll
for (int b_m = 0; b_m < CONV_OUTER_THREAD_BLOCK_M; b_m++) {
//stride index for the m direction
int b_m_id = m_id * CONV_OUTER_THREAD_BLOCK_M + b_m;
int out_m_id = b_tile_m + b_m_id;
#pragma unroll
for (int b_n = 0; b_n < CONV_OUTER_THREAD_BLOCK_N; b_n++) {
//stride index for the n direction
int b_n_id = n_id * CONV_OUTER_THREAD_BLOCK_N + b_n;
int out_n_id = b_tile_n + b_n_id;
//check that this element is within the range of the output shape
//to avoid illegal addressing
if (out_n_id < output_shape.width &&
out_m_id < output_shape.height &&
out_k_id < output_shape.depth &&
b_elem < batch_size) {
//the writing address
int out_index = (b_elem + b_k_id) * output_shape.width * output_shape.height * output_shape.depth +
out_k_id * output_shape.width * output_shape.height +
out_m_id * output_shape.width +
out_n_id;
//calculate one dot product and caches it
float inc = calculate_conv2d_dot<CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N, CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M, 1>(
&s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * f_k],
&s_load_block[(CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * b_k],
b_n_id,
b_m_id
);
//accumulate the dot product to the output memory at the writing address
//this is an atomic operation to avoid memory races
atomic_add(&output[out_index], inc);
}
}
}
}
}
}
//kernel function for training convolutional filters
__global__ void d_filter_convolve_2d_derivative(
float* input,
float* filter,
float* output,
shape input_shape,
shape filter_shape,
shape filter_chunks,
shape output_shape,
shape padding,
int input_depth,
size_t batch_size) {
//declare shared memory for the filter and the loaded block. Copying to shared memory reduces overall data loads
__shared__ float s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * FILTER_BLOCK_SIZE_K];
__shared__ float s_load_block[(CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int f_tile_n = blockIdx.x % filter_chunks.width * FILTER_BLOCK_SIZE_N;
int f_tile_m = blockIdx.y % filter_chunks.height * FILTER_BLOCK_SIZE_M;
int f_tile_k = blockIdx.z % filter_chunks.depth * FILTER_BLOCK_SIZE_K;
//3d block tile ids. Each block tile is loaded in a different thread block
int b_tile_n = (blockIdx.x / filter_chunks.width) * CONV_DER_BLOCK_SIZE_N;
int b_tile_m = (blockIdx.y / filter_chunks.height) * CONV_DER_BLOCK_SIZE_M;
int b_tile_k = (blockIdx.z / filter_chunks.depth) % input_depth * CONV_DER_BLOCK_SIZE_K;
//the current element in the batch
int b_elem = blockIdx.z / (filter_chunks.depth * input_depth);
//load filter chunk
#pragma unroll
for (int f_load_k = 0; f_load_k < FILTER_BLOCK_SIZE_K; f_load_k += CONV_DER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = f_load_k + k_id;
int g_load_k_id = f_tile_k + load_k_id;
#pragma unroll
for (int f_load_m = 0; f_load_m < FILTER_BLOCK_SIZE_M; f_load_m += CONV_DER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = f_load_m + m_id;
int g_load_m_id = f_tile_m + load_m_id;
#pragma unroll
for (int f_load_n = 0; f_load_n < FILTER_BLOCK_SIZE_N; f_load_n += CONV_DER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = f_load_n + n_id;
int g_load_n_id = f_tile_n + load_n_id;
//the index in shared memory for the current load element
int s_index = load_k_id * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M +
load_m_id * FILTER_BLOCK_SIZE_N +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (g_load_n_id < filter_shape.width &&
g_load_m_id < filter_shape.height &&
g_load_k_id < filter_shape.depth) {
//the index in global memory for the current load element
int g_index = b_elem * filter_shape.width * filter_shape.height * filter_shape.depth +
g_load_k_id * filter_shape.width * filter_shape.height +
g_load_m_id * filter_shape.width +
g_load_n_id;
//load the element into shared memory
s_filter[s_index] = filter[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_filter[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete filter loading)
cuda_syncthreads();
//load input chunk
#pragma unroll
for (int b_load_k = 0; b_load_k < CONV_DER_BLOCK_SIZE_K; b_load_k += CONV_DER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = b_load_k + k_id;
int g_load_k_id = b_tile_k + load_k_id;
#pragma unroll
for (int b_load_m = 0; b_load_m < CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M; b_load_m += CONV_DER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = b_load_m + m_id;
int g_load_m_id = f_tile_m + b_tile_m + load_m_id - padding.height;
#pragma unroll
for (int b_load_n = 0; b_load_n < CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N; b_load_n += CONV_DER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = b_load_n + n_id;
int g_load_n_id = f_tile_n + b_tile_n + load_n_id - padding.width;
//the index in shared memory for the current load element
int s_index = load_k_id * (CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) +
load_m_id * (CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (0 <= g_load_n_id &&
g_load_n_id < input_shape.width &&
0 <= g_load_m_id &&
g_load_m_id < input_shape.height &&
g_load_k_id < input_shape.depth) {
//the index in global memory for the current load element
int g_index = b_elem * input_shape.width * input_shape.height * input_shape.depth +
g_load_k_id * input_shape.width * input_shape.height +
g_load_m_id * input_shape.width +
g_load_n_id;
//load the element into shared memory
s_load_block[s_index] = input[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_load_block[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete block loading)
cuda_syncthreads();
//convolve and write
#pragma unroll
for (int filter_k = 0; filter_k < FILTER_BLOCK_SIZE_K; filter_k++) {
#pragma unroll
for (int stride_k = 0; stride_k < CONV_DER_THREAD_BLOCK_K; stride_k++) {
//stride index for the k direction
int start_k = k_id * CONV_DER_THREAD_BLOCK_K + stride_k;
#pragma unroll
for (int stride_m = 0; stride_m < CONV_DER_THREAD_BLOCK_M; stride_m++) {
//stride index for the m direction
int start_m = m_id * CONV_DER_THREAD_BLOCK_M + stride_m;
#pragma unroll
for (int stride_n = 0; stride_n < CONV_DER_THREAD_BLOCK_N; stride_n++) {
//stride index for the n direction
int start_n = n_id * CONV_DER_THREAD_BLOCK_N + stride_n;
//writing indices
int out_n_id = b_tile_n + start_n;
int out_m_id = b_tile_m + start_m;
int out_layer_id = b_tile_k + start_k;
int out_filter_id = f_tile_k + filter_k;
int out_k_id = out_filter_id * output_shape.depth + out_layer_id;
//check that the output element is within range to avoid illegal
//addressing
if (out_n_id < output_shape.width &&
out_m_id < output_shape.height &&
out_filter_id < filter_shape.depth &&
out_layer_id < output_shape.depth) {
//the output index
int out_index = out_k_id * output_shape.width * output_shape.height +
out_m_id * output_shape.width +
out_n_id;
//calculate one dot product and caches it
float inc = calculate_conv2d_dot<CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N, CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M, 1>(
&s_filter[filter_k * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M],
&s_load_block[start_k * (CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M)],
start_n,
start_m
);
//accumulate the dot product to the output memory at the writing address
//this is an atomic operation to avoid memory races
atomic_add(&output[out_index], inc);
}
}
}
}
}
cuda_syncthreads();
}
//kernel function for forward pass pooling
__global__ void d_pool_2d(
float* input,
int* mask,
float* output,
shape input_shape,
shape pool_size,
shape stride,
shape output_shape,
shape padding,
size_t batch_size) {
//declare shared block to convolve over taking the max at each point
__shared__ float s_block[(POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) * (POOL_BLOCK_SIZE_M + MAX_POOL_SIZE) * POOL_BLOCK_DEPTH];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int n_tile = blockIdx.x * POOL_BLOCK_SIZE_N;
int m_tile = blockIdx.y * POOL_BLOCK_SIZE_M;
int k_tile = blockIdx.z * POOL_BLOCK_DEPTH;
//load image block into s_block
//each block convolves one region of the image
#pragma unroll
for (int load_k = 0; load_k < POOL_BLOCK_DEPTH; load_k += POOL_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = load_k + k_id;
int g_load_k_id = (load_k_id + k_tile) % input_shape.depth;
int g_load_elem_id = ((load_k_id + k_tile) / input_shape.depth);
int g_in_elem_id = g_load_elem_id * (input_shape.width * input_shape.height * input_shape.depth);
int g_out_elem_id = g_load_elem_id * (output_shape.width * output_shape.height * output_shape.depth);
#pragma unroll
for (int load_m = 0; load_m < POOL_BLOCK_SIZE_M + MAX_POOL_SIZE; load_m += POOL_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = load_m + m_id;
int g_load_m_id = load_m_id + m_tile;
#pragma unroll
for (int load_n = 0; load_n < POOL_BLOCK_SIZE_N + MAX_POOL_SIZE; load_n += POOL_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = load_n + n_id;
int g_load_n_id = load_n_id + n_tile;
//the shared memory index for this specific element
int s_load_id = load_k_id * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) * (POOL_BLOCK_SIZE_M + MAX_POOL_SIZE) + load_m_id * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) + load_n_id;
//check that the element is within range to avoid illegal addressing
if (g_load_n_id < input_shape.width &&
g_load_m_id < input_shape.height &&
g_load_elem_id < batch_size) {
//the global memory index for this specific element
int g_load_id = g_in_elem_id + g_load_k_id * input_shape.width * input_shape.height + g_load_m_id * input_shape.width + g_load_n_id;
//load the element from global memory into shared memory
s_block[s_load_id] = input[g_load_id];
}
else {
//if the element is outside of range load the minimum negative number instead
//so that it doesn't get chosen by the max pool
s_block[s_load_id] = FLOAT_MIN;
}
}
}
//synchronise the threads to complete block loading
cuda_syncthreads();
//loop through each stride with even divisions among threads
//each thread iterates through every pool with the top left corner in the thread's allocated block
//calculate offsets so that the different blocks start at the right point (as it will not always be perfectly left aligned)
int n_offset = ((input_shape.width + padding.width - pool_size.width) - (n_tile + n_id * POOL_THREAD_BLOCK_N)) % stride.width;
int m_offset = ((input_shape.height + padding.height - pool_size.height) - (m_tile + m_id * POOL_THREAD_BLOCK_M)) % stride.height;
//stride over the image and calculate the max values for each pool region
for (int stride_m = m_offset; stride_m < POOL_THREAD_BLOCK_M; stride_m += stride.height) {
for (int stride_n = n_offset; stride_n < POOL_THREAD_BLOCK_N; stride_n += stride.width) {
//indices of the current stride for this thread
int stride_index_n = n_id * POOL_THREAD_BLOCK_N + stride_n;
int stride_index_m = m_id * POOL_THREAD_BLOCK_M + stride_m;
//initialise the max value to the minimum negative number (such that each pool value should be larger)
float tmp_max = FLOAT_MIN;
//cache indices of the greatest element for masking
int n_index = stride_index_n;
int m_index = stride_index_m;
for (int pool_m = 0; pool_m < pool_size.height; pool_m++) {
for (int pool_n = 0; pool_n < pool_size.width; pool_n++) {
//cache the index to compare without double loading
float tmp_read = s_block[load_k_id * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) * (POOL_BLOCK_SIZE_M + MAX_POOL_SIZE) +
(stride_index_m + pool_m) * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) +
stride_index_n + pool_n];
//if this element is greater than the current largest
if (tmp_read > tmp_max) {
//update the current largest
tmp_max = tmp_read;
//set the indices to tis element
n_index = stride_index_n + pool_n;
m_index = stride_index_m + pool_m;
}
}
}
//write tmp_max to output
//out indices for writing
int g_out_n = (n_tile + stride_index_n) / stride.width;
int g_out_m = (m_tile + stride_index_m) / stride.height;
//the index of the largest element for the mask
int mask_out_index = (m_tile + m_index) * input_shape.width + (n_tile + n_index);
//check that the element is within the range of the output to avoid illegal addressing
if (g_out_n < output_shape.width && g_out_m < output_shape.height && g_load_elem_id < batch_size) {
//write output element to output array
output[g_out_elem_id + g_load_k_id * output_shape.width * output_shape.height + g_out_m * output_shape.width + g_out_n] = tmp_max;
//write mask element to mask array
mask[g_out_elem_id + g_load_k_id * output_shape.width * output_shape.height + g_out_m * output_shape.width + g_out_n] = mask_out_index;
}
}
}
}
}
//kernel function for backward pass pooling
__global__ void d_pool_2d_derivative(
float* input,
int* mask,
float* output,
size_t input_size,
size_t output_size,
size_t batch_size) {
//the thread index for this specific thread
int t_id = threadIdx.x + blockIdx.x * blockDim.x;
//the batch index for this block
int batch_index = blockIdx.y;
//check that the index is within range to avoid illegal addressing
if (t_id < input_size && batch_index < batch_size) {
//get the index to write out to which we stored in the mask during forward propagation
int out_id = mask[batch_index * input_size + t_id];
//accumulate the partial derivative value to the output index from the mask atomically
atomic_add(&output[batch_index * output_size + out_id], input[batch_index * input_size + t_id]);
}
}
template <int BLOCK_N, int BLOCK_M, int DEPTH>
__device__ float calculate_conv2d_dot(volatile float* s_filter, volatile float* s_load_block, int start_n, int start_m)
{
//register accumulator variable
float accum = 0;
//stride through the region of interest
#pragma unroll
for (int dot_k = 0; dot_k < DEPTH; dot_k++) {
#pragma unroll
for (int dot_m = 0; dot_m < FILTER_BLOCK_SIZE_M; dot_m++) {
#pragma unroll
for (int dot_n = 0; dot_n < FILTER_BLOCK_SIZE_N; dot_n++) {
//get the indices of the filter and the block
int f_index = dot_k * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M + dot_m * FILTER_BLOCK_SIZE_N + dot_n;
int b_index = dot_k * BLOCK_N * BLOCK_M + (dot_m + start_m) * BLOCK_N + (dot_n + start_n);
//increment the accumulator by the product of the filter and block indices
accum += s_filter[f_index] * s_load_block[b_index];
}
}
}
//return the accumulated sum (the dot product)
return accum;
}
void filter_convolve_2d(
float* d_input,
float* d_filter,
float* d_output,
shape input_shape,
shape output_shape,
shape filter_shape,
shape padding,
size_t batch_size)
{
//divide the filter into chunks
shape filter_chunks = shape(
ceil_div(FILTER_BLOCK_SIZE_N, filter_shape.width),
ceil_div(FILTER_BLOCK_SIZE_M, filter_shape.height),
ceil_div(FILTER_BLOCK_SIZE_K, filter_shape.depth)
);
//declare the thread size of each block
dim3 threads_per_block(CONV_THREAD_SIZE_N, CONV_THREAD_SIZE_M, CONV_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(CONV_BLOCK_SIZE_N, input_shape.width + padding.width) * filter_chunks.width,
ceil_div(CONV_BLOCK_SIZE_M, input_shape.height + padding.width) * filter_chunks.height,
ceil_div(CONV_BATCH_DEPTH, batch_size) * filter_chunks.depth
);
//loop through each filter and launch the convolution kernel for each
for (int filter = 0; filter < output_shape.depth; filter++)
{
d_filter_convolve_2d << <blocks_per_grid, threads_per_block >> > (
d_input,
&d_filter[filter_shape.size() * filter],
d_output,
input_shape,
filter_shape,
filter_chunks,
output_shape,
padding,
filter,
output_shape.depth,
batch_size
);
}
}
void max_pool_2d(
float* d_input,
int* d_mask,
float* d_output,
shape input_shape,
shape pool_size,
shape stride,
shape output_shape,
shape padding,
size_t batch_size)
{
//declare the thread size of each block
dim3 threads_per_block(POOL_THREAD_SIZE_N, POOL_THREAD_SIZE_M, POOL_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(POOL_BLOCK_SIZE_N, input_shape.width + padding.width),
ceil_div(POOL_BLOCK_SIZE_M, input_shape.height + padding.height),
ceil_div(POOL_BLOCK_DEPTH, input_shape.depth * batch_size)
);
//launch the pooling kernel
d_pool_2d << <blocks_per_grid, threads_per_block >> > (d_input, d_mask, d_output, input_shape, pool_size, stride, output_shape, padding, batch_size);
}
void filter_outer_convolve_2d(
float* d_input,
float* d_filter,
float* d_output,
shape input_shape,
shape output_shape,
shape filter_shape,
shape padding,
size_t batch_size)
{
//divide the filter into chunks
shape filter_chunks = shape(
ceil_div(FILTER_BLOCK_SIZE_N, filter_shape.width),
ceil_div(FILTER_BLOCK_SIZE_M, filter_shape.height),
ceil_div(FILTER_BLOCK_SIZE_K, filter_shape.depth)
);
//declare the thread size of each block
dim3 threads_per_block(CONV_OUTER_THREAD_SIZE_N, CONV_OUTER_THREAD_SIZE_M, CONV_OUTER_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(CONV_OUTER_BLOCK_SIZE_N, input_shape.width) * filter_chunks.width,
ceil_div(CONV_OUTER_BLOCK_SIZE_M, input_shape.height) * filter_chunks.height,
ceil_div(CONV_OUTER_BLOCK_SIZE_K, batch_size) * filter_chunks.depth
);
//loop through each filter and launch the outer convolution kernel for each
for (int filter = 0; filter < input_shape.depth; filter++) {
d_filter_outer_convolve_2d << <blocks_per_grid, threads_per_block >> > (
d_input,
&d_filter[filter_shape.size() * filter],
d_output,
input_shape,
filter_shape,
filter_chunks,
output_shape,
padding,
filter,
batch_size
);
}
}
void filter_convolve_2d_derivative(
float* d_input,
float* d_pds,
float* d_output,
shape input_shape,
shape pd_shape,
shape output_shape,
shape padding,
size_t batch_size)
{
//divide the filter into chunks
shape filter_chunks = shape(
ceil_div(FILTER_BLOCK_SIZE_N, pd_shape.width),
ceil_div(FILTER_BLOCK_SIZE_M, pd_shape.height),
ceil_div(FILTER_BLOCK_SIZE_K, pd_shape.depth)
);
//declare the thread size of each block
dim3 threads_per_block(CONV_DER_THREAD_SIZE_N, CONV_DER_THREAD_SIZE_M, CONV_DER_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(CONV_DER_BLOCK_SIZE_N, input_shape.width) * filter_chunks.width,
ceil_div(CONV_DER_BLOCK_SIZE_M, input_shape.height) * filter_chunks.height,
ceil_div(CONV_DER_BLOCK_SIZE_K, input_shape.depth) * filter_chunks.depth * batch_size
);
//launch the convolutional derivative kernel
d_filter_convolve_2d_derivative << <blocks_per_grid, threads_per_block >> > (
d_input,
d_pds,
d_output,
input_shape,
pd_shape,
filter_chunks,
output_shape,
padding,
ceil_div(CONV_DER_BLOCK_SIZE_K, input_shape.depth),
batch_size
);
}
void max_pool_2d_derivative(
float* d_input,
int* d_mask,
float* d_output,
shape input_shape,
shape output_shape,
size_t batch_size)
{
//work out the total size of each input layer
size_t in_size = input_shape.width * input_shape.height;
//work out the total size of each output layer
size_t out_size = output_shape.width * output_shape.height;
//declare the number of threads per block
dim3 threads_per_block(in_size, 1);
//declare the number of blocks
dim3 blocks_per_grid(1, batch_size * output_shape.depth);
//if there are too many threads, split the threads among different blocks
if (in_size > BLOCK_SIZE) {
threads_per_block.x = BLOCK_SIZE;
blocks_per_grid.x = ceil_div(BLOCK_SIZE, in_size);
}
//launch the pool derivative kernel
d_pool_2d_derivative << <blocks_per_grid, threads_per_block >> > (d_input, d_mask, d_output, in_size, out_size, batch_size * output_shape.depth);
}
}
}
|
7e1e19ac2b2119e212b742fee91c13ffc2420bb6.cu
|
#include "conv_ops_2d.h"
namespace nnet {
namespace nnet_internal {
//kernel function for forward pass convolutions
__global__ void d_filter_convolve_2d(
float* input,
float* filter,
float* output,
shape input_shape,
shape filter_shape,
shape filter_chunks,
shape output_shape,
shape padding,
int filter_no,
int n_filters,
size_t batch_size) {
//declare shared memory for the filter and the loaded block. Copying to shared memory reduces overall data loads
__shared__ float s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * FILTER_BLOCK_SIZE_K];
__shared__ float s_load_block[(CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K * CONV_BATCH_DEPTH];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int f_tile_n = blockIdx.x % filter_chunks.width * FILTER_BLOCK_SIZE_N;
int f_tile_m = blockIdx.y % filter_chunks.height * FILTER_BLOCK_SIZE_M;
int f_tile_k = blockIdx.z % filter_chunks.depth * FILTER_BLOCK_SIZE_K;
//3d block tile ids. Each block tile is loaded in a different thread block
int b_tile_n = (blockIdx.x / filter_chunks.width) * CONV_BLOCK_SIZE_N;
int b_tile_m = (blockIdx.y / filter_chunks.height) * CONV_BLOCK_SIZE_M;
int b_tile_k = f_tile_k;
//the current element in the batch
int b_elem = (blockIdx.z / filter_chunks.depth) * CONV_BATCH_DEPTH;
//load filter chunk into shared memory
#pragma unroll
for (int f_load_k = 0; f_load_k < FILTER_BLOCK_SIZE_K; f_load_k += CONV_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = f_load_k + k_id;
int g_load_k_id = f_tile_k + load_k_id;
#pragma unroll
for (int f_load_m = 0; f_load_m < FILTER_BLOCK_SIZE_M; f_load_m += CONV_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = f_load_m + m_id;
int g_load_m_id = f_tile_m + load_m_id;
#pragma unroll
for (int f_load_n = 0; f_load_n < FILTER_BLOCK_SIZE_N; f_load_n += CONV_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = f_load_n + n_id;
int g_load_n_id = f_tile_n + load_n_id;
//the index in shared memory for the current load element
int s_index = load_k_id * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M +
load_m_id * FILTER_BLOCK_SIZE_N +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (g_load_n_id < filter_shape.width &&
g_load_m_id < filter_shape.height &&
g_load_k_id < filter_shape.depth &&
b_elem < batch_size) {
//the index in global memory for the current load element
int g_index = g_load_k_id * filter_shape.width * filter_shape.height +
g_load_m_id * filter_shape.width +
g_load_n_id;
//load the element into shared memory
s_filter[s_index] = filter[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_filter[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete filter loading)
cuda_syncthreads();
//load input chunk
#pragma unroll
for (int batch = 0; batch < CONV_BATCH_DEPTH; batch++) {
#pragma unroll
for (int b_load_k = 0; b_load_k < CONV_BLOCK_SIZE_K; b_load_k += CONV_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = b_load_k + k_id;
int g_load_k_id = b_tile_k + load_k_id;
#pragma unroll
for (int b_load_m = 0; b_load_m < CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M; b_load_m += CONV_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides. The global element is padded
int load_m_id = b_load_m + m_id;
int g_load_m_id = f_tile_m + b_tile_m + load_m_id - padding.height;
#pragma unroll
for (int b_load_n = 0; b_load_n < CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N; b_load_n += CONV_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides. The global element is padded
int load_n_id = b_load_n + n_id;
int g_load_n_id = f_tile_n + b_tile_n + load_n_id - padding.width;
//the index in shared memory for the current load element
int s_index = batch * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K +
load_k_id * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) +
load_m_id * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (0 <= g_load_n_id &&
g_load_n_id < input_shape.width &&
0 <= g_load_m_id &&
g_load_m_id < input_shape.height &&
g_load_k_id < input_shape.depth &&
b_elem + batch < batch_size) {
//the index in global memory for the current load element
int g_index = (b_elem + batch) * input_shape.width * input_shape.height * input_shape.depth +
g_load_k_id * input_shape.width * input_shape.height +
g_load_m_id * input_shape.width +
g_load_n_id;
//load the element into shared memory
s_load_block[s_index] = input[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_load_block[s_index] = 0.0;
}
}
}
}
}
//synchronise the threads in this block (to complete block loading)
cuda_syncthreads();
//convolve and write
#pragma unroll
for (int batch = 0; batch < CONV_BATCH_DEPTH; batch++) {
if (k_id == 0) {
#pragma unroll
for (int stride_m = 0; stride_m < CONV_THREAD_BLOCK_M; stride_m++) {
//stride index for the m direction
int start_m = m_id * CONV_THREAD_BLOCK_M + stride_m;
#pragma unroll
for (int stride_n = 0; stride_n < CONV_THREAD_BLOCK_N; stride_n++) {
//stride index for the n direction
int start_n = n_id * CONV_THREAD_BLOCK_N + stride_n;
//get the output indices for writing this element
int out_n_id = b_tile_n + start_n;
int out_m_id = b_tile_m + start_m;
int out_k_id = (b_elem + batch) * n_filters + filter_no;
//check that this element is within the range of the output shape
//to avoid illegal addressing
if (out_n_id < output_shape.width &&
out_m_id < output_shape.height &&
b_elem + batch < batch_size) {
//the writing address
int out_index = out_k_id * output_shape.width * output_shape.height +
out_m_id * output_shape.width +
out_n_id;
//calculate one dot product and caches it
float inc = calculate_conv2d_dot<CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N, CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M, CONV_THREAD_BLOCK_K>(
s_filter,
&s_load_block[batch * (CONV_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K],
start_n,
start_m
);
//accumulate the dot product to the output memory at the writing address
//this is an atomic operation to avoid memory races
atomic_add(&output[out_index], inc);
}
}
}
}
}
}
//kernel function for backward pass convolutions
__global__ void d_filter_outer_convolve_2d(
float* input,
float* filter,
float* output,
shape input_shape,
shape filter_shape,
shape filter_chunks,
shape output_shape,
shape padding,
int filter_no,
size_t batch_size) {
//declare shared memory for the filter and the loaded block. Copying to shared memory reduces overall data loads
__shared__ float s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * FILTER_BLOCK_SIZE_K];
__shared__ float s_load_block[(CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_OUTER_BLOCK_SIZE_K];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int f_tile_n = blockIdx.x % filter_chunks.width * FILTER_BLOCK_SIZE_N;
int f_tile_m = blockIdx.y % filter_chunks.height * FILTER_BLOCK_SIZE_M;
int f_tile_k = blockIdx.z % filter_chunks.depth * FILTER_BLOCK_SIZE_K;
//3d block tile ids. Each block tile is loaded in a different thread block
int b_tile_n = blockIdx.x / filter_chunks.width * CONV_OUTER_BLOCK_SIZE_N;
int b_tile_m = blockIdx.y / filter_chunks.height * CONV_OUTER_BLOCK_SIZE_M;
//the current element in the batch
int b_elem = (blockIdx.z / filter_chunks.depth) * CONV_OUTER_BLOCK_SIZE_K;
//load filter chunk
#pragma unroll
for (int f_load_k = 0; f_load_k < FILTER_BLOCK_SIZE_K; f_load_k += CONV_OUTER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = f_load_k + k_id;
int g_load_k_id = f_tile_k + load_k_id;
#pragma unroll
for (int f_load_m = 0; f_load_m < FILTER_BLOCK_SIZE_M; f_load_m += CONV_OUTER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = f_load_m + m_id;
int g_load_m_id = f_tile_m + load_m_id;
#pragma unroll
for (int f_load_n = 0; f_load_n < FILTER_BLOCK_SIZE_N; f_load_n += CONV_OUTER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = f_load_n + n_id;
int g_load_n_id = f_tile_n + load_n_id;
//the index in shared memory for the current load element
int s_index = load_k_id * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M +
load_m_id * FILTER_BLOCK_SIZE_N +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (g_load_n_id < filter_shape.width &&
g_load_m_id < filter_shape.height &&
g_load_k_id < filter_shape.depth) {
//the index in global memory for the current load element
int g_index = g_load_k_id * filter_shape.width * filter_shape.height +
(filter_shape.height - 1 - g_load_m_id) * filter_shape.width +
(filter_shape.width - 1 - g_load_n_id);
//load the element into shared memory
s_filter[s_index] = filter[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_filter[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete filter loading)
cuda_syncthreads();
//load input block + edge
#pragma unroll
for (int b_load_k = 0; b_load_k < CONV_OUTER_BLOCK_SIZE_K; b_load_k += CONV_OUTER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = b_load_k + k_id;
int g_load_k_id = b_elem + load_k_id;
#pragma unroll
for (int b_load_m = 0; b_load_m < CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M; b_load_m += CONV_OUTER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = b_load_m + m_id;
int g_load_m_id = b_tile_m + load_m_id - filter_shape.height + f_tile_m + 1 + padding.height;
#pragma unroll
for (int b_load_n = 0; b_load_n < CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N; b_load_n += CONV_OUTER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = b_load_n + n_id;
int g_load_n_id = b_tile_n + load_n_id - filter_shape.width + f_tile_n + 1 + padding.width;
//the index in shared memory for the current load element
int s_index = load_k_id * (CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) +
load_m_id * (CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (0 <= g_load_n_id &&
g_load_n_id < input_shape.width &&
0 <= g_load_m_id &&
g_load_m_id < input_shape.height &&
g_load_k_id < batch_size) {
//the index in global memory for the current load element
int g_index = g_load_k_id * input_shape.width * input_shape.height * input_shape.depth +
filter_no * input_shape.width * input_shape.height +
g_load_m_id * input_shape.width +
g_load_n_id;
//load the element into shared memory
s_load_block[s_index] = input[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_load_block[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete block loading)
cuda_syncthreads();
//convole and accumulate to output
#pragma unroll
for (int f_k = 0; f_k < FILTER_BLOCK_SIZE_K; f_k++) {
//stride index for the filter
int out_k_id = f_tile_k + f_k;
#pragma unroll
for (int b_k = 0; b_k < CONV_OUTER_THREAD_BLOCK_K; b_k++) {
//stride index for the k direction
int b_k_id = k_id * CONV_OUTER_THREAD_BLOCK_K + b_k;
#pragma unroll
for (int b_m = 0; b_m < CONV_OUTER_THREAD_BLOCK_M; b_m++) {
//stride index for the m direction
int b_m_id = m_id * CONV_OUTER_THREAD_BLOCK_M + b_m;
int out_m_id = b_tile_m + b_m_id;
#pragma unroll
for (int b_n = 0; b_n < CONV_OUTER_THREAD_BLOCK_N; b_n++) {
//stride index for the n direction
int b_n_id = n_id * CONV_OUTER_THREAD_BLOCK_N + b_n;
int out_n_id = b_tile_n + b_n_id;
//check that this element is within the range of the output shape
//to avoid illegal addressing
if (out_n_id < output_shape.width &&
out_m_id < output_shape.height &&
out_k_id < output_shape.depth &&
b_elem < batch_size) {
//the writing address
int out_index = (b_elem + b_k_id) * output_shape.width * output_shape.height * output_shape.depth +
out_k_id * output_shape.width * output_shape.height +
out_m_id * output_shape.width +
out_n_id;
//calculate one dot product and caches it
float inc = calculate_conv2d_dot<CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N, CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M, 1>(
&s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * f_k],
&s_load_block[(CONV_OUTER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_OUTER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * b_k],
b_n_id,
b_m_id
);
//accumulate the dot product to the output memory at the writing address
//this is an atomic operation to avoid memory races
atomic_add(&output[out_index], inc);
}
}
}
}
}
}
//kernel function for training convolutional filters
__global__ void d_filter_convolve_2d_derivative(
float* input,
float* filter,
float* output,
shape input_shape,
shape filter_shape,
shape filter_chunks,
shape output_shape,
shape padding,
int input_depth,
size_t batch_size) {
//declare shared memory for the filter and the loaded block. Copying to shared memory reduces overall data loads
__shared__ float s_filter[FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M * FILTER_BLOCK_SIZE_K];
__shared__ float s_load_block[(CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) * CONV_BLOCK_SIZE_K];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int f_tile_n = blockIdx.x % filter_chunks.width * FILTER_BLOCK_SIZE_N;
int f_tile_m = blockIdx.y % filter_chunks.height * FILTER_BLOCK_SIZE_M;
int f_tile_k = blockIdx.z % filter_chunks.depth * FILTER_BLOCK_SIZE_K;
//3d block tile ids. Each block tile is loaded in a different thread block
int b_tile_n = (blockIdx.x / filter_chunks.width) * CONV_DER_BLOCK_SIZE_N;
int b_tile_m = (blockIdx.y / filter_chunks.height) * CONV_DER_BLOCK_SIZE_M;
int b_tile_k = (blockIdx.z / filter_chunks.depth) % input_depth * CONV_DER_BLOCK_SIZE_K;
//the current element in the batch
int b_elem = blockIdx.z / (filter_chunks.depth * input_depth);
//load filter chunk
#pragma unroll
for (int f_load_k = 0; f_load_k < FILTER_BLOCK_SIZE_K; f_load_k += CONV_DER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = f_load_k + k_id;
int g_load_k_id = f_tile_k + load_k_id;
#pragma unroll
for (int f_load_m = 0; f_load_m < FILTER_BLOCK_SIZE_M; f_load_m += CONV_DER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = f_load_m + m_id;
int g_load_m_id = f_tile_m + load_m_id;
#pragma unroll
for (int f_load_n = 0; f_load_n < FILTER_BLOCK_SIZE_N; f_load_n += CONV_DER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = f_load_n + n_id;
int g_load_n_id = f_tile_n + load_n_id;
//the index in shared memory for the current load element
int s_index = load_k_id * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M +
load_m_id * FILTER_BLOCK_SIZE_N +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (g_load_n_id < filter_shape.width &&
g_load_m_id < filter_shape.height &&
g_load_k_id < filter_shape.depth) {
//the index in global memory for the current load element
int g_index = b_elem * filter_shape.width * filter_shape.height * filter_shape.depth +
g_load_k_id * filter_shape.width * filter_shape.height +
g_load_m_id * filter_shape.width +
g_load_n_id;
//load the element into shared memory
s_filter[s_index] = filter[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_filter[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete filter loading)
cuda_syncthreads();
//load input chunk
#pragma unroll
for (int b_load_k = 0; b_load_k < CONV_DER_BLOCK_SIZE_K; b_load_k += CONV_DER_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = b_load_k + k_id;
int g_load_k_id = b_tile_k + load_k_id;
#pragma unroll
for (int b_load_m = 0; b_load_m < CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M; b_load_m += CONV_DER_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = b_load_m + m_id;
int g_load_m_id = f_tile_m + b_tile_m + load_m_id - padding.height;
#pragma unroll
for (int b_load_n = 0; b_load_n < CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N; b_load_n += CONV_DER_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = b_load_n + n_id;
int g_load_n_id = f_tile_n + b_tile_n + load_n_id - padding.width;
//the index in shared memory for the current load element
int s_index = load_k_id * (CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M) +
load_m_id * (CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) +
load_n_id;
//check that the global element is within the right range (to avoid illegal addressing)
//check that the batch element is within range
if (0 <= g_load_n_id &&
g_load_n_id < input_shape.width &&
0 <= g_load_m_id &&
g_load_m_id < input_shape.height &&
g_load_k_id < input_shape.depth) {
//the index in global memory for the current load element
int g_index = b_elem * input_shape.width * input_shape.height * input_shape.depth +
g_load_k_id * input_shape.width * input_shape.height +
g_load_m_id * input_shape.width +
g_load_n_id;
//load the element into shared memory
s_load_block[s_index] = input[g_index];
}
else {
//if the global element is outside the range set the shared element to 0
s_load_block[s_index] = 0.0;
}
}
}
}
//synchronise the threads in this block (to complete block loading)
cuda_syncthreads();
//convolve and write
#pragma unroll
for (int filter_k = 0; filter_k < FILTER_BLOCK_SIZE_K; filter_k++) {
#pragma unroll
for (int stride_k = 0; stride_k < CONV_DER_THREAD_BLOCK_K; stride_k++) {
//stride index for the k direction
int start_k = k_id * CONV_DER_THREAD_BLOCK_K + stride_k;
#pragma unroll
for (int stride_m = 0; stride_m < CONV_DER_THREAD_BLOCK_M; stride_m++) {
//stride index for the m direction
int start_m = m_id * CONV_DER_THREAD_BLOCK_M + stride_m;
#pragma unroll
for (int stride_n = 0; stride_n < CONV_DER_THREAD_BLOCK_N; stride_n++) {
//stride index for the n direction
int start_n = n_id * CONV_DER_THREAD_BLOCK_N + stride_n;
//writing indices
int out_n_id = b_tile_n + start_n;
int out_m_id = b_tile_m + start_m;
int out_layer_id = b_tile_k + start_k;
int out_filter_id = f_tile_k + filter_k;
int out_k_id = out_filter_id * output_shape.depth + out_layer_id;
//check that the output element is within range to avoid illegal
//addressing
if (out_n_id < output_shape.width &&
out_m_id < output_shape.height &&
out_filter_id < filter_shape.depth &&
out_layer_id < output_shape.depth) {
//the output index
int out_index = out_k_id * output_shape.width * output_shape.height +
out_m_id * output_shape.width +
out_n_id;
//calculate one dot product and caches it
float inc = calculate_conv2d_dot<CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N, CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M, 1>(
&s_filter[filter_k * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M],
&s_load_block[start_k * (CONV_DER_BLOCK_SIZE_N + FILTER_BLOCK_SIZE_N) * (CONV_DER_BLOCK_SIZE_M + FILTER_BLOCK_SIZE_M)],
start_n,
start_m
);
//accumulate the dot product to the output memory at the writing address
//this is an atomic operation to avoid memory races
atomic_add(&output[out_index], inc);
}
}
}
}
}
cuda_syncthreads();
}
//kernel function for forward pass pooling
__global__ void d_pool_2d(
float* input,
int* mask,
float* output,
shape input_shape,
shape pool_size,
shape stride,
shape output_shape,
shape padding,
size_t batch_size) {
//declare shared block to convolve over taking the max at each point
__shared__ float s_block[(POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) * (POOL_BLOCK_SIZE_M + MAX_POOL_SIZE) * POOL_BLOCK_DEPTH];
//3d thread ids. Each element is cached in the registers to make accesses quicker
int n_id = threadIdx.x;
int m_id = threadIdx.y;
int k_id = threadIdx.z;
//3d filter tile ids. Each filter tile is loaded in a different thread block.
int n_tile = blockIdx.x * POOL_BLOCK_SIZE_N;
int m_tile = blockIdx.y * POOL_BLOCK_SIZE_M;
int k_tile = blockIdx.z * POOL_BLOCK_DEPTH;
//load image block into s_block
//each block convolves one region of the image
#pragma unroll
for (int load_k = 0; load_k < POOL_BLOCK_DEPTH; load_k += POOL_THREAD_SIZE_K) {
//indices for the k dimension for the shared and global strides
int load_k_id = load_k + k_id;
int g_load_k_id = (load_k_id + k_tile) % input_shape.depth;
int g_load_elem_id = ((load_k_id + k_tile) / input_shape.depth);
int g_in_elem_id = g_load_elem_id * (input_shape.width * input_shape.height * input_shape.depth);
int g_out_elem_id = g_load_elem_id * (output_shape.width * output_shape.height * output_shape.depth);
#pragma unroll
for (int load_m = 0; load_m < POOL_BLOCK_SIZE_M + MAX_POOL_SIZE; load_m += POOL_THREAD_SIZE_M) {
//indices for the m dimension for the shared and global strides
int load_m_id = load_m + m_id;
int g_load_m_id = load_m_id + m_tile;
#pragma unroll
for (int load_n = 0; load_n < POOL_BLOCK_SIZE_N + MAX_POOL_SIZE; load_n += POOL_THREAD_SIZE_N) {
//indices for the n dimension for the shared and global strides
int load_n_id = load_n + n_id;
int g_load_n_id = load_n_id + n_tile;
//the shared memory index for this specific element
int s_load_id = load_k_id * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) * (POOL_BLOCK_SIZE_M + MAX_POOL_SIZE) + load_m_id * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) + load_n_id;
//check that the element is within range to avoid illegal addressing
if (g_load_n_id < input_shape.width &&
g_load_m_id < input_shape.height &&
g_load_elem_id < batch_size) {
//the global memory index for this specific element
int g_load_id = g_in_elem_id + g_load_k_id * input_shape.width * input_shape.height + g_load_m_id * input_shape.width + g_load_n_id;
//load the element from global memory into shared memory
s_block[s_load_id] = input[g_load_id];
}
else {
//if the element is outside of range load the minimum negative number instead
//so that it doesn't get chosen by the max pool
s_block[s_load_id] = FLOAT_MIN;
}
}
}
//synchronise the threads to complete block loading
cuda_syncthreads();
//loop through each stride with even divisions among threads
//each thread iterates through every pool with the top left corner in the thread's allocated block
//calculate offsets so that the different blocks start at the right point (as it will not always be perfectly left aligned)
int n_offset = ((input_shape.width + padding.width - pool_size.width) - (n_tile + n_id * POOL_THREAD_BLOCK_N)) % stride.width;
int m_offset = ((input_shape.height + padding.height - pool_size.height) - (m_tile + m_id * POOL_THREAD_BLOCK_M)) % stride.height;
//stride over the image and calculate the max values for each pool region
for (int stride_m = m_offset; stride_m < POOL_THREAD_BLOCK_M; stride_m += stride.height) {
for (int stride_n = n_offset; stride_n < POOL_THREAD_BLOCK_N; stride_n += stride.width) {
//indices of the current stride for this thread
int stride_index_n = n_id * POOL_THREAD_BLOCK_N + stride_n;
int stride_index_m = m_id * POOL_THREAD_BLOCK_M + stride_m;
//initialise the max value to the minimum negative number (such that each pool value should be larger)
float tmp_max = FLOAT_MIN;
//cache indices of the greatest element for masking
int n_index = stride_index_n;
int m_index = stride_index_m;
for (int pool_m = 0; pool_m < pool_size.height; pool_m++) {
for (int pool_n = 0; pool_n < pool_size.width; pool_n++) {
//cache the index to compare without double loading
float tmp_read = s_block[load_k_id * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) * (POOL_BLOCK_SIZE_M + MAX_POOL_SIZE) +
(stride_index_m + pool_m) * (POOL_BLOCK_SIZE_N + MAX_POOL_SIZE) +
stride_index_n + pool_n];
//if this element is greater than the current largest
if (tmp_read > tmp_max) {
//update the current largest
tmp_max = tmp_read;
//set the indices to tis element
n_index = stride_index_n + pool_n;
m_index = stride_index_m + pool_m;
}
}
}
//write tmp_max to output
//out indices for writing
int g_out_n = (n_tile + stride_index_n) / stride.width;
int g_out_m = (m_tile + stride_index_m) / stride.height;
//the index of the largest element for the mask
int mask_out_index = (m_tile + m_index) * input_shape.width + (n_tile + n_index);
//check that the element is within the range of the output to avoid illegal addressing
if (g_out_n < output_shape.width && g_out_m < output_shape.height && g_load_elem_id < batch_size) {
//write output element to output array
output[g_out_elem_id + g_load_k_id * output_shape.width * output_shape.height + g_out_m * output_shape.width + g_out_n] = tmp_max;
//write mask element to mask array
mask[g_out_elem_id + g_load_k_id * output_shape.width * output_shape.height + g_out_m * output_shape.width + g_out_n] = mask_out_index;
}
}
}
}
}
//kernel function for backward pass pooling
__global__ void d_pool_2d_derivative(
float* input,
int* mask,
float* output,
size_t input_size,
size_t output_size,
size_t batch_size) {
//the thread index for this specific thread
int t_id = threadIdx.x + blockIdx.x * blockDim.x;
//the batch index for this block
int batch_index = blockIdx.y;
//check that the index is within range to avoid illegal addressing
if (t_id < input_size && batch_index < batch_size) {
//get the index to write out to which we stored in the mask during forward propagation
int out_id = mask[batch_index * input_size + t_id];
//accumulate the partial derivative value to the output index from the mask atomically
atomic_add(&output[batch_index * output_size + out_id], input[batch_index * input_size + t_id]);
}
}
template <int BLOCK_N, int BLOCK_M, int DEPTH>
__device__ float calculate_conv2d_dot(volatile float* s_filter, volatile float* s_load_block, int start_n, int start_m)
{
//register accumulator variable
float accum = 0;
//stride through the region of interest
#pragma unroll
for (int dot_k = 0; dot_k < DEPTH; dot_k++) {
#pragma unroll
for (int dot_m = 0; dot_m < FILTER_BLOCK_SIZE_M; dot_m++) {
#pragma unroll
for (int dot_n = 0; dot_n < FILTER_BLOCK_SIZE_N; dot_n++) {
//get the indices of the filter and the block
int f_index = dot_k * FILTER_BLOCK_SIZE_N * FILTER_BLOCK_SIZE_M + dot_m * FILTER_BLOCK_SIZE_N + dot_n;
int b_index = dot_k * BLOCK_N * BLOCK_M + (dot_m + start_m) * BLOCK_N + (dot_n + start_n);
//increment the accumulator by the product of the filter and block indices
accum += s_filter[f_index] * s_load_block[b_index];
}
}
}
//return the accumulated sum (the dot product)
return accum;
}
void filter_convolve_2d(
float* d_input,
float* d_filter,
float* d_output,
shape input_shape,
shape output_shape,
shape filter_shape,
shape padding,
size_t batch_size)
{
//divide the filter into chunks
shape filter_chunks = shape(
ceil_div(FILTER_BLOCK_SIZE_N, filter_shape.width),
ceil_div(FILTER_BLOCK_SIZE_M, filter_shape.height),
ceil_div(FILTER_BLOCK_SIZE_K, filter_shape.depth)
);
//declare the thread size of each block
dim3 threads_per_block(CONV_THREAD_SIZE_N, CONV_THREAD_SIZE_M, CONV_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(CONV_BLOCK_SIZE_N, input_shape.width + padding.width) * filter_chunks.width,
ceil_div(CONV_BLOCK_SIZE_M, input_shape.height + padding.width) * filter_chunks.height,
ceil_div(CONV_BATCH_DEPTH, batch_size) * filter_chunks.depth
);
//loop through each filter and launch the convolution kernel for each
for (int filter = 0; filter < output_shape.depth; filter++)
{
d_filter_convolve_2d << <blocks_per_grid, threads_per_block >> > (
d_input,
&d_filter[filter_shape.size() * filter],
d_output,
input_shape,
filter_shape,
filter_chunks,
output_shape,
padding,
filter,
output_shape.depth,
batch_size
);
}
}
void max_pool_2d(
float* d_input,
int* d_mask,
float* d_output,
shape input_shape,
shape pool_size,
shape stride,
shape output_shape,
shape padding,
size_t batch_size)
{
//declare the thread size of each block
dim3 threads_per_block(POOL_THREAD_SIZE_N, POOL_THREAD_SIZE_M, POOL_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(POOL_BLOCK_SIZE_N, input_shape.width + padding.width),
ceil_div(POOL_BLOCK_SIZE_M, input_shape.height + padding.height),
ceil_div(POOL_BLOCK_DEPTH, input_shape.depth * batch_size)
);
//launch the pooling kernel
d_pool_2d << <blocks_per_grid, threads_per_block >> > (d_input, d_mask, d_output, input_shape, pool_size, stride, output_shape, padding, batch_size);
}
void filter_outer_convolve_2d(
float* d_input,
float* d_filter,
float* d_output,
shape input_shape,
shape output_shape,
shape filter_shape,
shape padding,
size_t batch_size)
{
//divide the filter into chunks
shape filter_chunks = shape(
ceil_div(FILTER_BLOCK_SIZE_N, filter_shape.width),
ceil_div(FILTER_BLOCK_SIZE_M, filter_shape.height),
ceil_div(FILTER_BLOCK_SIZE_K, filter_shape.depth)
);
//declare the thread size of each block
dim3 threads_per_block(CONV_OUTER_THREAD_SIZE_N, CONV_OUTER_THREAD_SIZE_M, CONV_OUTER_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(CONV_OUTER_BLOCK_SIZE_N, input_shape.width) * filter_chunks.width,
ceil_div(CONV_OUTER_BLOCK_SIZE_M, input_shape.height) * filter_chunks.height,
ceil_div(CONV_OUTER_BLOCK_SIZE_K, batch_size) * filter_chunks.depth
);
//loop through each filter and launch the outer convolution kernel for each
for (int filter = 0; filter < input_shape.depth; filter++) {
d_filter_outer_convolve_2d << <blocks_per_grid, threads_per_block >> > (
d_input,
&d_filter[filter_shape.size() * filter],
d_output,
input_shape,
filter_shape,
filter_chunks,
output_shape,
padding,
filter,
batch_size
);
}
}
void filter_convolve_2d_derivative(
float* d_input,
float* d_pds,
float* d_output,
shape input_shape,
shape pd_shape,
shape output_shape,
shape padding,
size_t batch_size)
{
//divide the filter into chunks
shape filter_chunks = shape(
ceil_div(FILTER_BLOCK_SIZE_N, pd_shape.width),
ceil_div(FILTER_BLOCK_SIZE_M, pd_shape.height),
ceil_div(FILTER_BLOCK_SIZE_K, pd_shape.depth)
);
//declare the thread size of each block
dim3 threads_per_block(CONV_DER_THREAD_SIZE_N, CONV_DER_THREAD_SIZE_M, CONV_DER_THREAD_SIZE_K);
//declare the number of blocks in each dimension
dim3 blocks_per_grid(
ceil_div(CONV_DER_BLOCK_SIZE_N, input_shape.width) * filter_chunks.width,
ceil_div(CONV_DER_BLOCK_SIZE_M, input_shape.height) * filter_chunks.height,
ceil_div(CONV_DER_BLOCK_SIZE_K, input_shape.depth) * filter_chunks.depth * batch_size
);
//launch the convolutional derivative kernel
d_filter_convolve_2d_derivative << <blocks_per_grid, threads_per_block >> > (
d_input,
d_pds,
d_output,
input_shape,
pd_shape,
filter_chunks,
output_shape,
padding,
ceil_div(CONV_DER_BLOCK_SIZE_K, input_shape.depth),
batch_size
);
}
void max_pool_2d_derivative(
float* d_input,
int* d_mask,
float* d_output,
shape input_shape,
shape output_shape,
size_t batch_size)
{
//work out the total size of each input layer
size_t in_size = input_shape.width * input_shape.height;
//work out the total size of each output layer
size_t out_size = output_shape.width * output_shape.height;
//declare the number of threads per block
dim3 threads_per_block(in_size, 1);
//declare the number of blocks
dim3 blocks_per_grid(1, batch_size * output_shape.depth);
//if there are too many threads, split the threads among different blocks
if (in_size > BLOCK_SIZE) {
threads_per_block.x = BLOCK_SIZE;
blocks_per_grid.x = ceil_div(BLOCK_SIZE, in_size);
}
//launch the pool derivative kernel
d_pool_2d_derivative << <blocks_per_grid, threads_per_block >> > (d_input, d_mask, d_output, in_size, out_size, batch_size * output_shape.depth);
}
}
}
|
63cf002d150776987604caf0869fc5878457dfaa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void transpose(int *a,int *t)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
int flag=0,comp,j=1;
if(blockIdx.x==0 || (blockIdx.x+1)%gridDim.x == 0 || threadIdx.x==0 || (threadIdx.x+1)%blockDim.x==0)
flag=1;
if(!flag)
{
t[id]=0;
while(a[id]!=0){
comp=a[id]%2;
if(comp)
comp=0;
else
comp=1;
t[id]+=(comp*j);
j*=10;
a[id]/=2;
}
}
else
{
t[id]=a[id];
}
}
int main(void)
{
int *t,m,n,i,j;
int *d_a,*d_t,*d_m;
m=4;
n=4;
int a[]={1,2,3,4,5,5,8,8,9,4,10,12,13,14,15,16};
int size=sizeof(int)*m*n;
t=(int*)malloc(m*n*sizeof(int));
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_t,size);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( transpose), dim3(m),dim3(n), 0, 0, d_a,d_t);
hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost);
printf("result vector is:\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
{
printf("%d\t",t[i*n+j] );
}
printf("\n");
}
hipFree(d_a);
hipFree(d_t);
return 0;
}
|
63cf002d150776987604caf0869fc5878457dfaa.cu
|
#include<stdio.h>
#include<stdlib.h>
__global__ void transpose(int *a,int *t)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
int flag=0,comp,j=1;
if(blockIdx.x==0 || (blockIdx.x+1)%gridDim.x == 0 || threadIdx.x==0 || (threadIdx.x+1)%blockDim.x==0)
flag=1;
if(!flag)
{
t[id]=0;
while(a[id]!=0){
comp=a[id]%2;
if(comp)
comp=0;
else
comp=1;
t[id]+=(comp*j);
j*=10;
a[id]/=2;
}
}
else
{
t[id]=a[id];
}
}
int main(void)
{
int *t,m,n,i,j;
int *d_a,*d_t,*d_m;
m=4;
n=4;
int a[]={1,2,3,4,5,5,8,8,9,4,10,12,13,14,15,16};
int size=sizeof(int)*m*n;
t=(int*)malloc(m*n*sizeof(int));
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
transpose<<<m,n>>>(d_a,d_t);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("result vector is:\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
{
printf("%d\t",t[i*n+j] );
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_t);
return 0;
}
|
2ea4efee3227f73eebfe0e4774e48ebee1a8bd7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
double _t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
double _v_38_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i+2][j][k+2];
double _v_76_ = c2 * u1[i][j+2][k+2];
_v_76_ -= c2 * u1[i][j-2][k+2];
double _v_79_ = c2 * u2[i][j+2][k+2];
_v_79_ -= c2 * u2[i][j-2][k+2];
double _v_82_ = c2 * u1[i][j+2][k-2];
_v_82_ -= c2 * u1[i][j-2][k-2];
double _v_85_ = c2 * u2[i][j+2][k-2];
_v_85_ -= c2 * u2[i][j-2][k-2];
double _v_89_ = c2 * u1[i][j+2][k+1];
_v_89_ -= c2 * u1[i][j-2][k+1];
double _v_92_ = c2 * u2[i][j+2][k+1];
_v_92_ -= c2 * u2[i][j-2][k+1];
double _v_95_ = c2 * u1[i][j+2][k-1];
_v_95_ -= c2 * u1[i][j-2][k-1];
double _v_98_ = c2 * u2[i][j+2][k-1];
_v_98_ -= c2 * u2[i][j-2][k-1];
_v_38_ -= c2 * u1[i+2][j][k-2];
double _v_9_ = c2 * u1[i+2][j][k-2];
double _t_84_ = _v_38_;
double _v_39_ = c1 * u1[i+2][j][k+1];
double _v_77_ = c1 * u1[i][j+1][k+2];
_v_77_ -= c1 * u1[i][j-1][k+2];
double _v_80_ = c1 * u2[i][j+1][k+2];
_v_80_ -= c1 * u2[i][j-1][k+2];
double _v_83_ = c1 * u1[i][j+1][k-2];
_v_83_ -= c1 * u1[i][j-1][k-2];
double _v_86_ = c1 * u2[i][j+1][k-2];
_v_86_ -= c1 * u2[i][j-1][k-2];
double _v_90_ = c1 * u1[i][j+1][k+1];
_v_90_ -= c1 * u1[i][j-1][k+1];
double _v_93_ = c1 * u2[i][j+1][k+1];
_v_93_ -= c1 * u2[i][j-1][k+1];
double _v_96_ = c1 * u1[i][j+1][k-1];
_v_96_ -= c1 * u1[i][j-1][k-1];
double _v_99_ = c1 * u2[i][j+1][k-1];
_v_99_ -= c1 * u2[i][j-1][k-1];
_v_39_ -= c1 * u1[i+2][j][k-1];
_t_84_ += _v_39_;
double _v_40_ = strx[i] * _t_83_ * _t_84_;
double _v_19_ = c2 * u1[i+2][j][k+1];
double _v_28_ = c2 * u1[i+2][j][k-1];
double _v_56_ = c2 * _v_40_;
double _v_41_ = c2 * u2[i+2][j][k+2];
double _v_3_ = c2 * u2[i+2][j][k+2];
_v_41_ -= c2 * u2[i+2][j][k-2];
double _v_12_ = c2 * u2[i+2][j][k-2];
double _t_91_ = _v_41_;
double _v_42_ = c1 * u2[i+2][j][k+1];
_v_42_ -= c1 * u2[i+2][j][k-1];
_t_91_ += _v_42_;
double _t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_43_ = stry[j] * _t_90_ * _t_91_;
double _v_22_ = c2 * u2[i+2][j][k+1];
double _v_31_ = c2 * u2[i+2][j][k-1];
_v_56_ += c2 * _v_43_;
double _t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_44_ = c2 * u3[i+2][j][k+2];
double _v_6_ = c2 * u3[i+2][j][k+2];
_v_44_ -= c2 * u3[i+2][j][k-2];
double _v_15_ = c2 * u3[i+2][j][k-2];
double _t_96_ = _v_44_;
double _v_45_ = c1 * u3[i+2][j][k+1];
_v_45_ -= c1 * u3[i+2][j][k-1];
_t_96_ += _v_45_;
double _v_46_ = _t_95_ * _t_96_;
double _v_25_ = c2 * u3[i+2][j][k+1];
double _v_34_ = c2 * u3[i+2][j][k-1];
_v_56_ += c2 * _v_46_;
double _t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
double _t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
double _v_47_ = c2 * u1[i-2][j][k+2];
_v_0_ -= c2 * u1[i-2][j][k+2];
_v_47_ -= c2 * u1[i-2][j][k-2];
_v_9_ -= c2 * u1[i-2][j][k-2];
double _t_102_ = _v_47_;
double _v_48_ = c1 * u1[i-2][j][k+1];
_v_48_ -= c1 * u1[i-2][j][k-1];
_t_102_ += _v_48_;
double _v_49_ = strx[i] * _t_101_ * _t_102_;
_v_19_ -= c2 * u1[i-2][j][k+1];
_v_28_ -= c2 * u1[i-2][j][k-1];
_v_56_ += c2 * _v_49_;
double _v_50_ = c2 * u2[i-2][j][k+2];
_v_3_ -= c2 * u2[i-2][j][k+2];
_v_50_ -= c2 * u2[i-2][j][k-2];
_v_12_ -= c2 * u2[i-2][j][k-2];
double _t_109_ = _v_50_;
double _v_51_ = c1 * u2[i-2][j][k+1];
_v_51_ -= c1 * u2[i-2][j][k-1];
_t_109_ += _v_51_;
double _t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_52_ = stry[j] * _t_108_ * _t_109_;
_v_22_ -= c2 * u2[i-2][j][k+1];
_v_31_ -= c2 * u2[i-2][j][k-1];
_v_56_ += c2 * _v_52_;
double _t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_53_ = c2 * u3[i-2][j][k+2];
_v_6_ -= c2 * u3[i-2][j][k+2];
_v_53_ -= c2 * u3[i-2][j][k-2];
_v_15_ -= c2 * u3[i-2][j][k-2];
double _t_114_ = _v_53_;
double _v_54_ = c1 * u3[i-2][j][k+1];
_v_54_ -= c1 * u3[i-2][j][k-1];
_t_114_ += _v_54_;
double _v_55_ = _t_113_ * _t_114_;
_v_25_ -= c2 * u3[i-2][j][k+1];
_v_34_ -= c2 * u3[i-2][j][k-1];
_v_56_ += c2 * _v_55_;
double _t_79_ = stry[j] * _v_56_;
double _t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
double _t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
double _v_57_ = c2 * u1[i+1][j][k+2];
_v_57_ -= c2 * u1[i+1][j][k-2];
double _t_121_ = _v_57_;
double _v_58_ = c1 * u1[i+1][j][k+1];
double _v_20_ = c1 * u1[i+1][j][k+1];
_v_58_ -= c1 * u1[i+1][j][k-1];
double _v_29_ = c1 * u1[i+1][j][k-1];
_t_121_ += _v_58_;
double _v_59_ = strx[i] * _t_120_ * _t_121_;
double _v_1_ = c1 * u1[i+1][j][k+2];
double _v_10_ = c1 * u1[i+1][j][k-2];
double _v_75_ = c1 * _v_59_;
double _v_60_ = c2 * u2[i+1][j][k+2];
_v_60_ -= c2 * u2[i+1][j][k-2];
double _t_128_ = _v_60_;
double _v_61_ = c1 * u2[i+1][j][k+1];
double _v_23_ = c1 * u2[i+1][j][k+1];
_v_61_ -= c1 * u2[i+1][j][k-1];
double _v_32_ = c1 * u2[i+1][j][k-1];
_t_128_ += _v_61_;
double _t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_62_ = stry[j] * _t_127_ * _t_128_;
double _v_4_ = c1 * u2[i+1][j][k+2];
double _v_13_ = c1 * u2[i+1][j][k-2];
_v_75_ += c1 * _v_62_;
double _t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_63_ = c2 * u3[i+1][j][k+2];
_v_63_ -= c2 * u3[i+1][j][k-2];
double _t_133_ = _v_63_;
double _v_64_ = c1 * u3[i+1][j][k+1];
double _v_26_ = c1 * u3[i+1][j][k+1];
_v_64_ -= c1 * u3[i+1][j][k-1];
double _v_35_ = c1 * u3[i+1][j][k-1];
_t_133_ += _v_64_;
double _v_65_ = _t_132_ * _t_133_;
double _v_7_ = c1 * u3[i+1][j][k+2];
double _v_16_ = c1 * u3[i+1][j][k-2];
_v_75_ += c1 * _v_65_;
double _t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
double _t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
double _v_66_ = c2 * u1[i-1][j][k+2];
_v_66_ -= c2 * u1[i-1][j][k-2];
double _t_139_ = _v_66_;
double _v_67_ = c1 * u1[i-1][j][k+1];
_v_20_ -= c1 * u1[i-1][j][k+1];
_v_67_ -= c1 * u1[i-1][j][k-1];
_v_29_ -= c1 * u1[i-1][j][k-1];
_t_139_ += _v_67_;
double _v_68_ = strx[i] * _t_138_ * _t_139_;
_v_1_ -= c1 * u1[i-1][j][k+2];
_v_10_ -= c1 * u1[i-1][j][k-2];
_v_75_ += c1 * _v_68_;
double _v_69_ = c2 * u2[i-1][j][k+2];
_v_69_ -= c2 * u2[i-1][j][k-2];
double _t_146_ = _v_69_;
double _v_70_ = c1 * u2[i-1][j][k+1];
_v_23_ -= c1 * u2[i-1][j][k+1];
_v_70_ -= c1 * u2[i-1][j][k-1];
_v_32_ -= c1 * u2[i-1][j][k-1];
_t_146_ += _v_70_;
double _t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_71_ = stry[j] * _t_145_ * _t_146_;
_v_4_ -= c1 * u2[i-1][j][k+2];
_v_13_ -= c1 * u2[i-1][j][k-2];
_v_75_ += c1 * _v_71_;
double _t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_72_ = c2 * u3[i-1][j][k+2];
_v_72_ -= c2 * u3[i-1][j][k-2];
double _t_151_ = _v_72_;
double _v_73_ = c1 * u3[i-1][j][k+1];
_v_26_ -= c1 * u3[i-1][j][k+1];
_v_73_ -= c1 * u3[i-1][j][k-1];
_v_35_ -= c1 * u3[i-1][j][k-1];
_t_151_ += _v_73_;
double _v_74_ = _t_150_ * _t_151_;
_v_7_ -= c1 * u3[i-1][j][k+2];
_v_16_ -= c1 * u3[i-1][j][k-2];
_v_75_ += c1 * _v_74_;
_t_79_ += stry[j] * _v_75_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_79_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_8_ = stry[j] * _t_16_ * _t_17_;
double _v_18_ = c2 * _v_8_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_7_ = 2.0 * mu[i][j][k+2];
double _t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_7_ += la[i][j][k+2];
double _t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
double _t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = stry[j] * _t_3_ * strx[i];
_v_18_ += c2 * _v_2_;
double _t_11_ = _v_3_;
_t_11_ += _v_4_;
double _v_5_ = _t_10_ * _t_11_;
_v_18_ += c2 * _v_5_;
double _t_24_ = _v_9_;
_t_24_ += _v_10_;
double _t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
double _t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
double _t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_22_ = _t_23_ * _t_24_;
double _v_11_ = stry[j] * _t_22_ * strx[i];
_v_18_ += c2 * _v_11_;
double _t_30_ = _v_12_;
_t_30_ += _v_13_;
double _t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_14_ = _t_29_ * _t_30_;
_v_18_ += c2 * _v_14_;
double _t_36_ = _v_15_;
_t_36_ += _v_16_;
double _v_17_ = stry[j] * _t_35_ * _t_36_;
_v_18_ += c2 * _v_17_;
double _t_0_ = _v_18_;
double _t_56_ = _v_25_;
_t_56_ += _v_26_;
double _t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_27_ = stry[j] * _t_55_ * _t_56_;
double _v_37_ = c1 * _v_27_;
double _t_44_ = _v_19_;
_t_44_ += _v_20_;
double _t_46_ = 2.0 * mu[i][j][k+1];
double _t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_46_ += la[i][j][k+1];
double _t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
double _t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_42_ = _t_43_ * _t_44_;
double _v_21_ = stry[j] * _t_42_ * strx[i+2];
_v_37_ += c1 * _v_21_;
double _t_50_ = _v_22_;
_t_50_ += _v_23_;
double _v_24_ = _t_49_ * _t_50_;
_v_37_ += c1 * _v_24_;
double _t_63_ = _v_28_;
_t_63_ += _v_29_;
double _t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
double _t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_61_ = _t_62_ * _t_63_;
double _v_30_ = stry[j] * _t_61_ * strx[i-2];
_v_37_ += c1 * _v_30_;
double _t_69_ = _v_31_;
_t_69_ += _v_32_;
double _t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_33_ = _t_68_ * _t_69_;
_v_37_ += c1 * _v_33_;
double _t_75_ = _v_34_;
_t_75_ += _v_35_;
double _v_36_ = stry[j] * _t_74_ * _t_75_;
_v_37_ += c1 * _v_36_;
_t_0_ += _v_37_;
r1ic0jc0kc0 += _t_0_;
double _t_159_ = _t_10_;
double _t_160_ = _v_76_;
_t_160_ += _v_77_;
double _t_158_ = _t_159_ * _t_160_;
double _v_78_ = strx[i] * _t_158_ * stry[j+2];
double _v_88_ = c2 * _v_78_;
double _t_165_ = _v_79_;
_t_165_ += _v_80_;
double _v_81_ = _t_164_ * _t_165_;
_v_88_ += c2 * _v_81_;
double _t_171_ = _t_29_;
double _t_172_ = _v_82_;
_t_172_ += _v_83_;
double _t_170_ = _t_171_ * _t_172_;
double _v_84_ = strx[i] * _t_170_ * stry[j];
_v_88_ += c2 * _v_84_;
double _t_177_ = _v_85_;
_t_177_ += _v_86_;
double _v_87_ = _t_176_ * _t_177_;
_v_88_ += c2 * _v_87_;
double _t_155_ = _v_88_;
double _t_184_ = _t_49_;
double _t_185_ = _v_89_;
_t_185_ += _v_90_;
double _t_183_ = _t_184_ * _t_185_;
double _v_91_ = strx[i] * _t_183_ * stry[j-2];
double _v_101_ = c1 * _v_91_;
double _t_190_ = _v_92_;
_t_190_ += _v_93_;
double _v_94_ = _t_189_ * _t_190_;
_v_101_ += c1 * _v_94_;
double _t_196_ = _t_68_;
double _t_197_ = _v_95_;
_t_197_ += _v_96_;
double _t_195_ = _t_196_ * _t_197_;
double _v_97_ = strx[i] * _t_195_ * stry[j];
_v_101_ += c1 * _v_97_;
double _t_202_ = _v_98_;
_t_202_ += _v_99_;
double _v_100_ = _t_201_ * _t_202_;
_v_101_ += c1 * _v_100_;
_t_155_ += _v_101_;
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
hipMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u1;
hipMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u2;
hipMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u3;
hipMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met1;
hipMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met2;
hipMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met3;
hipMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met4;
hipMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( curvi) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
}
|
2ea4efee3227f73eebfe0e4774e48ebee1a8bd7c.cu
|
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
double _t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
double _v_38_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i+2][j][k+2];
double _v_76_ = c2 * u1[i][j+2][k+2];
_v_76_ -= c2 * u1[i][j-2][k+2];
double _v_79_ = c2 * u2[i][j+2][k+2];
_v_79_ -= c2 * u2[i][j-2][k+2];
double _v_82_ = c2 * u1[i][j+2][k-2];
_v_82_ -= c2 * u1[i][j-2][k-2];
double _v_85_ = c2 * u2[i][j+2][k-2];
_v_85_ -= c2 * u2[i][j-2][k-2];
double _v_89_ = c2 * u1[i][j+2][k+1];
_v_89_ -= c2 * u1[i][j-2][k+1];
double _v_92_ = c2 * u2[i][j+2][k+1];
_v_92_ -= c2 * u2[i][j-2][k+1];
double _v_95_ = c2 * u1[i][j+2][k-1];
_v_95_ -= c2 * u1[i][j-2][k-1];
double _v_98_ = c2 * u2[i][j+2][k-1];
_v_98_ -= c2 * u2[i][j-2][k-1];
_v_38_ -= c2 * u1[i+2][j][k-2];
double _v_9_ = c2 * u1[i+2][j][k-2];
double _t_84_ = _v_38_;
double _v_39_ = c1 * u1[i+2][j][k+1];
double _v_77_ = c1 * u1[i][j+1][k+2];
_v_77_ -= c1 * u1[i][j-1][k+2];
double _v_80_ = c1 * u2[i][j+1][k+2];
_v_80_ -= c1 * u2[i][j-1][k+2];
double _v_83_ = c1 * u1[i][j+1][k-2];
_v_83_ -= c1 * u1[i][j-1][k-2];
double _v_86_ = c1 * u2[i][j+1][k-2];
_v_86_ -= c1 * u2[i][j-1][k-2];
double _v_90_ = c1 * u1[i][j+1][k+1];
_v_90_ -= c1 * u1[i][j-1][k+1];
double _v_93_ = c1 * u2[i][j+1][k+1];
_v_93_ -= c1 * u2[i][j-1][k+1];
double _v_96_ = c1 * u1[i][j+1][k-1];
_v_96_ -= c1 * u1[i][j-1][k-1];
double _v_99_ = c1 * u2[i][j+1][k-1];
_v_99_ -= c1 * u2[i][j-1][k-1];
_v_39_ -= c1 * u1[i+2][j][k-1];
_t_84_ += _v_39_;
double _v_40_ = strx[i] * _t_83_ * _t_84_;
double _v_19_ = c2 * u1[i+2][j][k+1];
double _v_28_ = c2 * u1[i+2][j][k-1];
double _v_56_ = c2 * _v_40_;
double _v_41_ = c2 * u2[i+2][j][k+2];
double _v_3_ = c2 * u2[i+2][j][k+2];
_v_41_ -= c2 * u2[i+2][j][k-2];
double _v_12_ = c2 * u2[i+2][j][k-2];
double _t_91_ = _v_41_;
double _v_42_ = c1 * u2[i+2][j][k+1];
_v_42_ -= c1 * u2[i+2][j][k-1];
_t_91_ += _v_42_;
double _t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_43_ = stry[j] * _t_90_ * _t_91_;
double _v_22_ = c2 * u2[i+2][j][k+1];
double _v_31_ = c2 * u2[i+2][j][k-1];
_v_56_ += c2 * _v_43_;
double _t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_44_ = c2 * u3[i+2][j][k+2];
double _v_6_ = c2 * u3[i+2][j][k+2];
_v_44_ -= c2 * u3[i+2][j][k-2];
double _v_15_ = c2 * u3[i+2][j][k-2];
double _t_96_ = _v_44_;
double _v_45_ = c1 * u3[i+2][j][k+1];
_v_45_ -= c1 * u3[i+2][j][k-1];
_t_96_ += _v_45_;
double _v_46_ = _t_95_ * _t_96_;
double _v_25_ = c2 * u3[i+2][j][k+1];
double _v_34_ = c2 * u3[i+2][j][k-1];
_v_56_ += c2 * _v_46_;
double _t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
double _t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
double _v_47_ = c2 * u1[i-2][j][k+2];
_v_0_ -= c2 * u1[i-2][j][k+2];
_v_47_ -= c2 * u1[i-2][j][k-2];
_v_9_ -= c2 * u1[i-2][j][k-2];
double _t_102_ = _v_47_;
double _v_48_ = c1 * u1[i-2][j][k+1];
_v_48_ -= c1 * u1[i-2][j][k-1];
_t_102_ += _v_48_;
double _v_49_ = strx[i] * _t_101_ * _t_102_;
_v_19_ -= c2 * u1[i-2][j][k+1];
_v_28_ -= c2 * u1[i-2][j][k-1];
_v_56_ += c2 * _v_49_;
double _v_50_ = c2 * u2[i-2][j][k+2];
_v_3_ -= c2 * u2[i-2][j][k+2];
_v_50_ -= c2 * u2[i-2][j][k-2];
_v_12_ -= c2 * u2[i-2][j][k-2];
double _t_109_ = _v_50_;
double _v_51_ = c1 * u2[i-2][j][k+1];
_v_51_ -= c1 * u2[i-2][j][k-1];
_t_109_ += _v_51_;
double _t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_52_ = stry[j] * _t_108_ * _t_109_;
_v_22_ -= c2 * u2[i-2][j][k+1];
_v_31_ -= c2 * u2[i-2][j][k-1];
_v_56_ += c2 * _v_52_;
double _t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_53_ = c2 * u3[i-2][j][k+2];
_v_6_ -= c2 * u3[i-2][j][k+2];
_v_53_ -= c2 * u3[i-2][j][k-2];
_v_15_ -= c2 * u3[i-2][j][k-2];
double _t_114_ = _v_53_;
double _v_54_ = c1 * u3[i-2][j][k+1];
_v_54_ -= c1 * u3[i-2][j][k-1];
_t_114_ += _v_54_;
double _v_55_ = _t_113_ * _t_114_;
_v_25_ -= c2 * u3[i-2][j][k+1];
_v_34_ -= c2 * u3[i-2][j][k-1];
_v_56_ += c2 * _v_55_;
double _t_79_ = stry[j] * _v_56_;
double _t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
double _t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
double _v_57_ = c2 * u1[i+1][j][k+2];
_v_57_ -= c2 * u1[i+1][j][k-2];
double _t_121_ = _v_57_;
double _v_58_ = c1 * u1[i+1][j][k+1];
double _v_20_ = c1 * u1[i+1][j][k+1];
_v_58_ -= c1 * u1[i+1][j][k-1];
double _v_29_ = c1 * u1[i+1][j][k-1];
_t_121_ += _v_58_;
double _v_59_ = strx[i] * _t_120_ * _t_121_;
double _v_1_ = c1 * u1[i+1][j][k+2];
double _v_10_ = c1 * u1[i+1][j][k-2];
double _v_75_ = c1 * _v_59_;
double _v_60_ = c2 * u2[i+1][j][k+2];
_v_60_ -= c2 * u2[i+1][j][k-2];
double _t_128_ = _v_60_;
double _v_61_ = c1 * u2[i+1][j][k+1];
double _v_23_ = c1 * u2[i+1][j][k+1];
_v_61_ -= c1 * u2[i+1][j][k-1];
double _v_32_ = c1 * u2[i+1][j][k-1];
_t_128_ += _v_61_;
double _t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_62_ = stry[j] * _t_127_ * _t_128_;
double _v_4_ = c1 * u2[i+1][j][k+2];
double _v_13_ = c1 * u2[i+1][j][k-2];
_v_75_ += c1 * _v_62_;
double _t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_63_ = c2 * u3[i+1][j][k+2];
_v_63_ -= c2 * u3[i+1][j][k-2];
double _t_133_ = _v_63_;
double _v_64_ = c1 * u3[i+1][j][k+1];
double _v_26_ = c1 * u3[i+1][j][k+1];
_v_64_ -= c1 * u3[i+1][j][k-1];
double _v_35_ = c1 * u3[i+1][j][k-1];
_t_133_ += _v_64_;
double _v_65_ = _t_132_ * _t_133_;
double _v_7_ = c1 * u3[i+1][j][k+2];
double _v_16_ = c1 * u3[i+1][j][k-2];
_v_75_ += c1 * _v_65_;
double _t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
double _t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
double _v_66_ = c2 * u1[i-1][j][k+2];
_v_66_ -= c2 * u1[i-1][j][k-2];
double _t_139_ = _v_66_;
double _v_67_ = c1 * u1[i-1][j][k+1];
_v_20_ -= c1 * u1[i-1][j][k+1];
_v_67_ -= c1 * u1[i-1][j][k-1];
_v_29_ -= c1 * u1[i-1][j][k-1];
_t_139_ += _v_67_;
double _v_68_ = strx[i] * _t_138_ * _t_139_;
_v_1_ -= c1 * u1[i-1][j][k+2];
_v_10_ -= c1 * u1[i-1][j][k-2];
_v_75_ += c1 * _v_68_;
double _v_69_ = c2 * u2[i-1][j][k+2];
_v_69_ -= c2 * u2[i-1][j][k-2];
double _t_146_ = _v_69_;
double _v_70_ = c1 * u2[i-1][j][k+1];
_v_23_ -= c1 * u2[i-1][j][k+1];
_v_70_ -= c1 * u2[i-1][j][k-1];
_v_32_ -= c1 * u2[i-1][j][k-1];
_t_146_ += _v_70_;
double _t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_71_ = stry[j] * _t_145_ * _t_146_;
_v_4_ -= c1 * u2[i-1][j][k+2];
_v_13_ -= c1 * u2[i-1][j][k-2];
_v_75_ += c1 * _v_71_;
double _t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_72_ = c2 * u3[i-1][j][k+2];
_v_72_ -= c2 * u3[i-1][j][k-2];
double _t_151_ = _v_72_;
double _v_73_ = c1 * u3[i-1][j][k+1];
_v_26_ -= c1 * u3[i-1][j][k+1];
_v_73_ -= c1 * u3[i-1][j][k-1];
_v_35_ -= c1 * u3[i-1][j][k-1];
_t_151_ += _v_73_;
double _v_74_ = _t_150_ * _t_151_;
_v_7_ -= c1 * u3[i-1][j][k+2];
_v_16_ -= c1 * u3[i-1][j][k-2];
_v_75_ += c1 * _v_74_;
_t_79_ += stry[j] * _v_75_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_79_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_8_ = stry[j] * _t_16_ * _t_17_;
double _v_18_ = c2 * _v_8_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_7_ = 2.0 * mu[i][j][k+2];
double _t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_7_ += la[i][j][k+2];
double _t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
double _t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = stry[j] * _t_3_ * strx[i];
_v_18_ += c2 * _v_2_;
double _t_11_ = _v_3_;
_t_11_ += _v_4_;
double _v_5_ = _t_10_ * _t_11_;
_v_18_ += c2 * _v_5_;
double _t_24_ = _v_9_;
_t_24_ += _v_10_;
double _t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
double _t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
double _t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_22_ = _t_23_ * _t_24_;
double _v_11_ = stry[j] * _t_22_ * strx[i];
_v_18_ += c2 * _v_11_;
double _t_30_ = _v_12_;
_t_30_ += _v_13_;
double _t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_14_ = _t_29_ * _t_30_;
_v_18_ += c2 * _v_14_;
double _t_36_ = _v_15_;
_t_36_ += _v_16_;
double _v_17_ = stry[j] * _t_35_ * _t_36_;
_v_18_ += c2 * _v_17_;
double _t_0_ = _v_18_;
double _t_56_ = _v_25_;
_t_56_ += _v_26_;
double _t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_27_ = stry[j] * _t_55_ * _t_56_;
double _v_37_ = c1 * _v_27_;
double _t_44_ = _v_19_;
_t_44_ += _v_20_;
double _t_46_ = 2.0 * mu[i][j][k+1];
double _t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_46_ += la[i][j][k+1];
double _t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
double _t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_42_ = _t_43_ * _t_44_;
double _v_21_ = stry[j] * _t_42_ * strx[i+2];
_v_37_ += c1 * _v_21_;
double _t_50_ = _v_22_;
_t_50_ += _v_23_;
double _v_24_ = _t_49_ * _t_50_;
_v_37_ += c1 * _v_24_;
double _t_63_ = _v_28_;
_t_63_ += _v_29_;
double _t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
double _t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_61_ = _t_62_ * _t_63_;
double _v_30_ = stry[j] * _t_61_ * strx[i-2];
_v_37_ += c1 * _v_30_;
double _t_69_ = _v_31_;
_t_69_ += _v_32_;
double _t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_33_ = _t_68_ * _t_69_;
_v_37_ += c1 * _v_33_;
double _t_75_ = _v_34_;
_t_75_ += _v_35_;
double _v_36_ = stry[j] * _t_74_ * _t_75_;
_v_37_ += c1 * _v_36_;
_t_0_ += _v_37_;
r1ic0jc0kc0 += _t_0_;
double _t_159_ = _t_10_;
double _t_160_ = _v_76_;
_t_160_ += _v_77_;
double _t_158_ = _t_159_ * _t_160_;
double _v_78_ = strx[i] * _t_158_ * stry[j+2];
double _v_88_ = c2 * _v_78_;
double _t_165_ = _v_79_;
_t_165_ += _v_80_;
double _v_81_ = _t_164_ * _t_165_;
_v_88_ += c2 * _v_81_;
double _t_171_ = _t_29_;
double _t_172_ = _v_82_;
_t_172_ += _v_83_;
double _t_170_ = _t_171_ * _t_172_;
double _v_84_ = strx[i] * _t_170_ * stry[j];
_v_88_ += c2 * _v_84_;
double _t_177_ = _v_85_;
_t_177_ += _v_86_;
double _v_87_ = _t_176_ * _t_177_;
_v_88_ += c2 * _v_87_;
double _t_155_ = _v_88_;
double _t_184_ = _t_49_;
double _t_185_ = _v_89_;
_t_185_ += _v_90_;
double _t_183_ = _t_184_ * _t_185_;
double _v_91_ = strx[i] * _t_183_ * stry[j-2];
double _v_101_ = c1 * _v_91_;
double _t_190_ = _v_92_;
_t_190_ += _v_93_;
double _v_94_ = _t_189_ * _t_190_;
_v_101_ += c1 * _v_94_;
double _t_196_ = _t_68_;
double _t_197_ = _v_95_;
_t_197_ += _v_96_;
double _t_195_ = _t_196_ * _t_197_;
double _v_97_ = strx[i] * _t_195_ * stry[j];
_v_101_ += c1 * _v_97_;
double _t_202_ = _v_98_;
_t_202_ += _v_99_;
double _v_100_ = _t_201_ * _t_202_;
_v_101_ += c1 * _v_100_;
_t_155_ += _v_101_;
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
3c31c72dfc17190278dc50c6495cda238ac2c06e.hip
|
// !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdio.h>
#include <random>
#include <bitset>
#include <unordered_set>
#include <vector>
#include <chrono>
#include <algorithm>
#include <limits>
#include <fstream>
// includes, cuda
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// includes, thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
////////////////////////////////////////////////////////////////////////////////
#define WORD_SIZE 1000
#define DATA_SIZE 100000
#define UINT_BITSIZE (unsigned int)(8*sizeof(unsigned int))
#define SUBWORDS_PER_WORD(N) (unsigned int)(::ceil((float)N / (sizeof(unsigned int) * 8.0f)))
////////////////////////////////////////////////////////////////////////////////
// function declarations
template<size_t N>
unsigned int hamming_distance(const typename std::bitset<N>& A, const typename std::bitset<N>& B);
template<size_t N>
typename std::bitset<N> random_bitset(double p);
template<size_t N>
unsigned int MSB(std::bitset<N> bitset);
template<size_t N, size_t M>
void generate_random_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount = true, const bool consoleOutput = true, const float p = 0.5f);
template<size_t N, size_t M>
void generate_predictable_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount = true, const bool consoleOutput = true);
template<size_t N, size_t M>
void load_data(const char* words_filepath, const char* pairs_filepath, typename std::vector<std::bitset<N>>& _data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs);
template<size_t N, size_t M>
void save_data(const char* words_filepath, const char* pairs_filepath, const typename std::vector<std::bitset<N>>& _data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs);
template<size_t N>
void find_ham1(const typename std::vector<std::bitset<N>>& _data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool timeCount = true, const bool pairsOutput = true);
template<size_t N, size_t M>
thrust::device_vector<unsigned int> move_data_to_GPU(const typename std::vector<std::bitset<N>>& data_vec);
__global__ void find_ham1_GPU_ker(const unsigned int* subwords, unsigned int* pair_flags, const unsigned int subwords_per_pair_flags);
__global__ void count_ones(unsigned int* d_data, size_t pair_flags_size);
template<size_t N>
void find_ham1_GPU(thrust::device_vector<unsigned int>& d_subwords, \
thrust::device_vector<unsigned int>& d_pair_flags, \
thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool save_to_file, const bool timeCount = true, const bool pairsOutput = false, const bool checkData = false);
template<size_t N>
void process_pairs_from_flags(thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool checkData, const bool pairsOutput, const bool saveToFile = false, const char* filepath = "", unsigned int pairs_count = 0);
////////////////////////////////////////////////////////////////////////////////
// word generating function - from bernoulli distribution
template<size_t N> // p = 0.5 gives equal chance for 0's and 1's to occur
typename std::bitset<N> random_bitset(double p)
{
typename std::bitset<N> bits;
std::random_device rd;
std::mt19937 gen(rd());
std::bernoulli_distribution dist(p);
for (size_t i = 0; i < N; ++i) {
bits[i] = dist(gen);
}
return bits;
}
////////////////////////////////////////////////////////////////////////////////
// fins MSB index in bitset
template<size_t N>
unsigned int MSB(std::bitset<N> bitset)
{
if (!bitset.to_ulong())
return 0;
int msb = 0;
while (bitset.to_ulong() != 1)
{
bitset >>= 1;
++msb;
}
return msb;
}
////////////////////////////////////////////////////////////////////////////////
// data generating function
template<size_t N, size_t M>
void generate_random_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount, const bool consoleOutput, const float p)
{
std::unordered_set<std::bitset<N>> data_uset;
data_uset.reserve(M);
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
if (consoleOutput) std::cout << "Beginning Data Generation...\n";
// Record start time
if (consoleOutput && timeCount) start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < M; ++i)
{
while (false == (data_uset.emplace(random_bitset<N>(p)).second));
}
// Record end time
if (consoleOutput && timeCount) finish = std::chrono::high_resolution_clock::now();
// Copy to vector
for (const auto& it : data_uset)
{
_data_vec.emplace_back(it);
}
if (consoleOutput)
{
if (timeCount) elapsed = finish - start;
std::cout << "Data Generation Finished!\n";
if (timeCount) std::cout << "Elapsed time: " << elapsed.count() << " s\n";
std::cout << std::endl;
}
}
template<size_t N, size_t M>
void generate_predictable_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount, const bool consoleOutput)
{
std::unordered_set<std::bitset<N>> data_uset; // used for uniqueness
data_uset.reserve(M);
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
unsigned int starting_word_counter = 0;
std::bitset<N> starting_word;
std::bitset<N> current_word;
if (consoleOutput) std::cout << "Beginning Data Generation...\n";
// Record start time
if (consoleOutput && timeCount) start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < M;)
{
starting_word = std::bitset<N>(starting_word_counter++);
// Check if starting word can be placed into vector
if (data_uset.emplace(starting_word).second)
{
_data_vec.emplace_back(starting_word);
++i;
}
// Words are created by setting one of the bits before MSB to 1
for (size_t j = starting_word.to_ulong() == 0 ? 0 : MSB(starting_word) + 1; j < N && i < M; ++j)
{
current_word = std::bitset<N>(0);
current_word[j] = 1;
current_word = current_word ^ starting_word;
// Check if current word can be placed into vector
if (data_uset.emplace(current_word).second) {
_data_vec.emplace_back(current_word);
++i;
}
}
}
// Record end time
if (consoleOutput && timeCount) finish = std::chrono::high_resolution_clock::now();
if (consoleOutput)
{
if (timeCount) elapsed = finish - start;
std::cout << "Data Generation Finished!\n";
if (_data_vec.size() != M) std::cout << "Wrong Data Size: " << _data_vec.size() << std::endl;
if (timeCount) std::cout << "Elapsed time: " << elapsed.count() << " s\n";
std::cout << std::endl;
}
}
////////////////////////////////////////////////////////////////////////////////
// data loading function
template<size_t N, size_t M>
void load_data(const char* words_filepath, const char* pairs_filepath, typename std::vector<std::bitset<N>>& _data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs)
{
size_t pairs_count = 0;
std::string line, number;
std::string separator = ";";
size_t sep_pos = 0;
std::ifstream words_file;
words_file.open(words_filepath);
if (!words_file.good()) {
std::cout << "Error opening words_file\n\n";
return;
}
_data_vec.clear();
// discard first line(header)
std::getline(words_file, line);
//check if WORD_SIZE/DATA_SIZE the same as defined in program
std::getline(words_file, line);
sep_pos = line.find(separator);
if (sep_pos == std::string::npos) {
std::cout << "Error(words_file) - wrong formatting\n\n";
return;
}
if (std::stoi(line.substr(0, sep_pos)) != N) {
std::cout << "Error(words_file) - WORD_SIZE different\n\n";
return;
}
if (std::stoi(line.substr(sep_pos + 1)) != M) {
std::cout << "Error(words_file) - DATA_SIZE different\n\n";
return;
}
// main words_file loop
for (size_t i = 0; i < M; ++i) {
std::getline(words_file, line);
_data_vec.emplace_back(std::bitset<N>(line));
}
std::ifstream pairs_file;
pairs_file.open(pairs_filepath);
if (!words_file.good()) {
std::cout << "Error opening pairs_file\n\n";
return;
}
ham1_pairs.clear();
// discard first line(header)
std::getline(pairs_file, line);
//check if WORD_SIZE the same as defined in program
std::getline(pairs_file, line);
sep_pos = line.find(separator);
if (sep_pos == std::string::npos) {
std::cout << "Error(pairs_file) - wrong formatting\n\n";
return;
}
if (std::stoi(line.substr(0, sep_pos)) != N) {
std::cout << "Error(pairs_file) - WORD_SIZE different\n\n";
return;
}
pairs_count = std::stoi(line.substr(sep_pos + 1));
// main pairs_file loop
for (size_t i = 0; i < pairs_count; ++i) {
std::getline(pairs_file, line);
sep_pos = line.find(separator);
ham1_pairs.emplace_back(std::make_pair<std::bitset<N>, std::bitset<N>>(std::bitset<N>(line.substr(0, sep_pos)), std::bitset<N>(line.substr(sep_pos + 1))));
}
pairs_file.close();
std::cout << "Loading Data successful!" << std::endl << std::endl;
}
////////////////////////////////////////////////////////////////////////////////
// data saving function
template<size_t N, size_t M>
void save_data(const char* words_filepath, const char* pairs_filepath, const typename std::vector<std::bitset<N>>& _data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs)
{
if (_data_vec.empty()) {
std::cout << "Words vector is empty!";
return;
}
std::ofstream words_file;
std::remove(words_filepath);
words_file.open(words_filepath);
words_file << "WORD_SIZE;DATA_SIZE\n";
words_file << N << ';' << M << "\n";
// main words_file loop
for (size_t i = 0; i < M; ++i)
words_file << _data_vec[i].to_string() << "\n";
words_file.close();
if (ham1_pairs.empty()) {
std::cout << "Saving Data successful!" << std::endl << std::endl;
return;
}
std::ofstream pairs_file;
std::remove(pairs_filepath);
pairs_file.open(pairs_filepath);
pairs_file << "WORD_SIZE;PAIRS_COUNT\n";
pairs_file << N << ';' << ham1_pairs.size() << "\n";
// main pairs_file loop
for (size_t i = 0; i < ham1_pairs.size(); ++i)
pairs_file << ham1_pairs[i].first.to_string() << ';' << ham1_pairs[i].second.to_string() << "\n";
pairs_file.close();
std::cout << "Saving Data successful!" << std::endl << std::endl;
}
////////////////////////////////////////////////////////////////////////////////
// finding pairs with hamming distance 1 on CPU
template<size_t N>
void find_ham1(const typename std::vector<std::bitset<N>>& data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool timeCount, const bool pairsOutput)
{
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
std::cout << "Looking for pairs with hamming distance 1 ...\n";
ham1_pairs.clear();
// Record start time
if (timeCount) start = std::chrono::high_resolution_clock::now();
unsigned int ham1 = 0;
for (auto it1 = std::begin(data_vec); it1 != std::end(data_vec); ++it1)
{
for (auto it2 = std::next(it1); it2 != std::end(data_vec); ++it2)
{
if (1 == hamming_distance<N>(*it1, *it2))
{
ham1_pairs.emplace_back(std::make_pair<std::bitset<N>, std::bitset<N>>(std::bitset<N>(*it1), std::bitset<N>(*it2)));
++ham1;
}
}
}
// Record end time
if (timeCount) finish = std::chrono::high_resolution_clock::now();
if (timeCount) elapsed = finish - start;
std::cout << "Finished!\n";
if (timeCount) std::cout << "Elapsed time: " << elapsed.count() << " s\n";
std::cout << ham1 << " pairs found\n\n";
if (ham1 && pairsOutput)
{
std::cout << "Pairs found:\n";
for (const auto& it : ham1_pairs)
{
std::cout << it.first << " " << it.second << std::endl;
}
std::cout << std::endl;
}
}
////////////////////////////////////////////////////////////////////////////////
// hamming distance function
template<size_t N>
unsigned int hamming_distance(const typename std::bitset<N>& A, const typename std::bitset<N>& B)
{
return (A ^ B).count();
}
////////////////////////////////////////////////////////////////////////////////
// move data to gpu
template<size_t N, size_t M>
thrust::device_vector<unsigned int> move_data_to_GPU(const typename std::vector<std::bitset<N>>& data_vec)
{
//N - WORD_SIZE, M - DATA_SIZE
thrust::host_vector<unsigned int> h_words(M * SUBWORDS_PER_WORD(N));
thrust::device_vector<unsigned int> d_words;
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
// Record start time
start = std::chrono::high_resolution_clock::now();
int i = 0; // index in h_words
if (N < UINT_BITSIZE)
{
for (const auto& word_bitset : data_vec)
{
std::string word_str = word_bitset.to_string().substr(0, N);
// add padding bits
for (size_t word_str_size = N; word_str_size < UINT_BITSIZE; ++word_str_size)
word_str += "0";
unsigned int word = (unsigned int)(std::bitset<N>(word_str).to_ulong());
h_words[i++] = word;
}
}
else
{
size_t j = 0; // currently processed subwords
for (; j + UINT_BITSIZE < N; j += UINT_BITSIZE)
{
for (const auto& word_bitset : data_vec)
{
std::string subword_str = word_bitset.to_string().substr(j, UINT_BITSIZE);
unsigned int subword = (unsigned int)(std::bitset<N>(subword_str).to_ulong());
h_words[i++] = subword;
}
}
if (j + UINT_BITSIZE != N) // last subword smaller than UINT_BITSIZE
{
for (const auto& word_bitset : data_vec)
{
std::string subword_str = word_bitset.to_string().substr(j, N - j);
for (size_t subword_str_size = N - j; subword_str_size < UINT_BITSIZE; ++subword_str_size)
subword_str += "0";
unsigned int subword = (unsigned int)(std::bitset<N>(subword_str).to_ulong());
h_words[i++] = subword;
}
}
}
d_words = h_words;
// Record end time
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
std::cout << std::endl << "Data moved to GPU" << std::endl;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl << std::endl;
return d_words;
}
////////////////////////////////////////////////////////////////////////////////
// HammingOne kernel
__global__ void find_ham1_GPU_ker(const unsigned int* subwords, unsigned int* pair_flags, const unsigned int subwords_per_pair_flags)
{
const unsigned int word_idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int subwords_per_word = SUBWORDS_PER_WORD(WORD_SIZE);
if (word_idx >= DATA_SIZE) return;
unsigned int hamming_distance, flag_subword_offset, flag_in_subword;
// comparison_idx - index of word that word under word_idx is being compared to
for (size_t comparison_idx = word_idx + 1; comparison_idx < DATA_SIZE; ++comparison_idx)
{
hamming_distance = 0;
for (size_t i = 0; i < subwords_per_word && hamming_distance < 2; ++i)
hamming_distance += __popc(subwords[word_idx + DATA_SIZE * i] ^ subwords[comparison_idx + DATA_SIZE * i]);
// each word has at least DATA_SIZE flags, flags for matches are set on match's index in CPU data
if (hamming_distance && !(hamming_distance >> 1)) // true when hamming_distance == 1
{
flag_subword_offset = comparison_idx / UINT_BITSIZE;
flag_in_subword = 1 << UINT_BITSIZE - 1 - comparison_idx % UINT_BITSIZE;
pair_flags[word_idx * subwords_per_pair_flags + flag_subword_offset] |= flag_in_subword;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Counting kernel
__global__ void count_ones(unsigned int* d_data, size_t pair_flags_size)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= pair_flags_size) return;
d_data[tid] = __popc(d_data[tid]);
}
////////////////////////////////////////////////////////////////////////////////
// finding pairs with hamming distance 1 on GPU
template<size_t N>
void find_ham1_GPU(thrust::device_vector<unsigned int>& d_subwords, \
thrust::device_vector<unsigned int>& d_pair_flags, \
thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool save_to_file, const bool timeCount, const bool pairsOutput, const bool checkData)
{
// vars with 2 are for kernel processing flags
unsigned int threads = 512;
unsigned int threads2 = 512;
unsigned int blocks = (unsigned int)::ceil(DATA_SIZE / (double)threads);
unsigned int blocks2 = (unsigned int)::ceil(pair_flags_size / (double)threads);
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock2(threads2, 1, 1);
dim3 dimGrid2(blocks2, 1, 1);
float elapsed;
hipEvent_t start, stop;
const unsigned int subwords_per_pair_flags = pair_flags_size / DATA_SIZE;
auto d_subwords_ptr = thrust::raw_pointer_cast(d_subwords.begin().base());
auto d_pair_flags_ptr = thrust::raw_pointer_cast(d_pair_flags.begin().base());
unsigned int pairs_count_GPU = 0;
if (timeCount) {
hipEventCreate(&start);
hipEventCreate(&stop);
}
std::cout << "Looking for pairs with hamming distance 1 ...\n";
if (timeCount) hipEventRecord(start, 0);
hipLaunchKernelGGL(( find_ham1_GPU_ker), dim3(dimGrid), dim3(dimBlock), 0, 0, d_subwords_ptr, d_pair_flags_ptr, subwords_per_pair_flags);
if (timeCount) hipEventRecord(stop, 0);
hipError_t err = hipGetLastError();
if (err != hipSuccess) printf("%s\n", hipGetErrorString(err));
hipDeviceSynchronize();
if (timeCount) hipEventElapsedTime(&elapsed, start, stop);
std::cout << "Finished!\n";
if (timeCount) std::cout << "Elapsed time: " << elapsed << " ms\n";
h_pair_flags = d_pair_flags;
// get number of pairs from pair_flags
hipLaunchKernelGGL(( count_ones), dim3(dimGrid2), dim3(dimBlock2), 0, 0, thrust::raw_pointer_cast(d_pair_flags.begin().base()), pair_flags_size);
err = hipGetLastError();
if (err != hipSuccess) printf("%s\n", hipGetErrorString(err));
hipDeviceSynchronize();
pairs_count_GPU = thrust::reduce(d_pair_flags.begin(), d_pair_flags.end()); // d_pair_flags invalidated
d_pair_flags = h_pair_flags; // reset to correct value
std::cout << pairs_count_GPU << " pairs found\n\n";
if (pairs_count_GPU) {
if (save_to_file)
process_pairs_from_flags<N>(h_pair_flags, pair_flags_size, data_vec, ham1_pairs, checkData, pairsOutput, true, "./pairs_GPU.csv", pairs_count_GPU);
else
process_pairs_from_flags<N>(h_pair_flags, pair_flags_size, data_vec, ham1_pairs, checkData, pairsOutput);
}
else
process_pairs_from_flags<N>(h_pair_flags, pair_flags_size, data_vec, ham1_pairs, checkData, false);
if (timeCount) {
hipEventDestroy(start);
hipEventDestroy(stop);
}
}
////////////////////////////////////////////////////////////////////////////////
// pairs_flag to pairs output
template<size_t N>
void process_pairs_from_flags(thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool checkData, const bool pairsOutput, const bool saveToFile, const char* filepath, unsigned int pairs_count)
{
const unsigned int subwords_per_word_flags = pair_flags_size / DATA_SIZE;
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
if (saveToFile) {
std::ofstream file;
std::remove(filepath);
// Record start time
start = std::chrono::high_resolution_clock::now();
file.open(filepath);
file << "WORD_SIZE;PAIRS_COUNT\n";
file << N << ';' << pairs_count << "\n";
for (size_t word_idx = 0; word_idx < DATA_SIZE; ++word_idx)
{
bool flag_found = false;
unsigned int* word_flags = new unsigned int[subwords_per_word_flags];
// Get flags of current word
for (size_t i = 0; i < subwords_per_word_flags; ++i)
word_flags[i] = h_pair_flags[word_idx * subwords_per_word_flags + i];
// Check if the word has any match
for (size_t i = 0; i < subwords_per_word_flags; ++i)
{
if (word_flags[i]) {
flag_found = true;
break;
}
}
if (!flag_found) continue; // Process next word if current has no pairs found
for (int i = subwords_per_word_flags - 1; i >= 0; --i)
{
if (!word_flags[i]) continue; // Process next subset if current has no pairs found
int flags_set = __popcnt(word_flags[i]); // Matches in current flag subset
int flag_pos = (i + 1) * UINT_BITSIZE - 1; // Index of match in CPU data vector
size_t j = 0; // j - matches processed
while (j < flags_set)
{
if (word_flags[i] % 2) {
file << data_vec[word_idx].to_string() << ';' << data_vec[flag_pos].to_string() << "\n";
++j;
}
word_flags[i] = word_flags[i] >> 1;
--flag_pos;
}
}
delete[] word_flags;
}
file.close();
// Record end time
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
std::cout << "Saving Data successful!" << std::endl;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl << std::endl;
}
else {
if (!checkData && !pairsOutput) return;
bool dataCorrect = true;
if (pairsOutput) std::cout << "Pairs found:\n";
// Record start time
start = std::chrono::high_resolution_clock::now();
for (size_t word_idx = 0; word_idx < DATA_SIZE; ++word_idx)
{
bool flag_found = false;
unsigned int* word_flags = new unsigned int[subwords_per_word_flags];
// Get flags of current word
for (size_t i = 0; i < subwords_per_word_flags; ++i)
word_flags[i] = h_pair_flags[word_idx * subwords_per_word_flags + i];
// Check if the word has any match
for (size_t i = 0; i < subwords_per_word_flags; ++i)
{
if (word_flags[i]) {
flag_found = true;
break;
}
}
if (!flag_found) continue; // Process next word if current has no pairs found
for (int i = subwords_per_word_flags - 1; i >= 0; --i)
{
if (!word_flags[i]) continue; // Process next subset if current has no pairs found
int flags_set = __popcnt(word_flags[i]); // Matches in current flag subset
int flag_pos = (i + 1) * UINT_BITSIZE - 1; // Index of match in CPU data vector
size_t j = 0; // j - matches processed
while (j < flags_set)
{
if (word_flags[i] % 2) {
if (checkData) {
std::pair<std::bitset<N>, std::bitset<N>> pair = std::make_pair<std::bitset<N>, std::bitset<N>>(std::bitset<N>(data_vec[word_idx].to_string()), std::bitset<N>(data_vec[flag_pos].to_string()));
if (std::end(ham1_pairs) == std::find(std::begin(ham1_pairs), std::end(ham1_pairs), pair)) {
std::cout << "No matching pair found in CPU Data" << std::endl;
dataCorrect = false;
}
}
if (pairsOutput) std::cout << data_vec[word_idx] << " " << data_vec[flag_pos] << std::endl;
++j;
}
word_flags[i] = word_flags[i] >> 1;
--flag_pos;
}
}
delete[] word_flags;
}
// Record end time
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
if (checkData && dataCorrect) {
if (pairsOutput) std::cout << std::endl;
std::cout << "GPU Data is consistent with CPU Data" << std::endl << std::endl;
}
else if (pairsOutput || !dataCorrect) std::cout << std::endl;
if (checkData) std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl << std::endl;
}
}
////////////////////////////////////////////////////////////////////////////////
int main()
{
bool updated_data_GPU = true;
unsigned short menu_choice = 0;
size_t pair_flags_size = DATA_SIZE * (::ceil((double)DATA_SIZE / (double)UINT_BITSIZE));
thrust::device_vector<unsigned int> d_subwords;
thrust::device_vector<unsigned int> d_pair_flags(pair_flags_size, 0);
thrust::host_vector<unsigned int> h_pair_flags;
std::vector<std::bitset<WORD_SIZE>> data_vec;
std::vector<std::pair<std::bitset<WORD_SIZE>, std::bitset<WORD_SIZE>>> ham1_pairs;
while (menu_choice != 5) {
std::cout << "1. Generate Data" << std::endl;
std::cout << "2. Save/Load Data" << std::endl;
if (!data_vec.empty()) {
if (d_subwords.empty())
std::cout << "3. Move Data to GPU - !!! No Data on GPU !!!" << std::endl;
else if (!updated_data_GPU)
std::cout << "3. Move Data to GPU - !!! Data on GPU not matching Data on CPU !!!" << std::endl;
else
std::cout << "3. Move Data to GPU" << std::endl;
std::cout << "4. Find Pairs" << std::endl;
}
else {
std::cout << "3. Move Data to GPU - !!! Generate/Load Data before attempting to move the data to GPU !!!" << std::endl;
std::cout << "4. Find Pairs - !!! Generate/Load Data before attempting to find pairs !!!" << std::endl;
}
std::cout << "5. Exit" << std::endl;
std::cout << "6. Clear Console" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
switch (menu_choice)
{
case 1:
std::cout << std::endl;
while (menu_choice != 3)
{
std::cout << "1. Generate Random Data" << std::endl;
std::cout << "2. Generate Predictable Data" << std::endl;
std::cout << "3. Go Back" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
std::cout << std::endl;
switch (menu_choice)
{
case 1:
if (!data_vec.empty())
data_vec.clear();
data_vec.reserve(DATA_SIZE);
if (!ham1_pairs.empty())
ham1_pairs.clear();
generate_random_data<WORD_SIZE, DATA_SIZE>(data_vec);
updated_data_GPU = false;
break;
case 2:
if (!data_vec.empty())
data_vec.clear();
data_vec.reserve(DATA_SIZE);
if (!ham1_pairs.empty())
ham1_pairs.clear();
generate_predictable_data<WORD_SIZE, DATA_SIZE>(data_vec);
updated_data_GPU = false;
break;
case 3:
break;
default:
std::cout << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
menu_choice = 1;
break;
case 2:
std::cout << std::endl;
while (menu_choice != 3)
{
std::cout << "1. Save Data" << std::endl;
std::cout << "2. Load Data" << std::endl;
std::cout << "3. Go Back" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
std::cout << std::endl;
switch (menu_choice)
{
case 1:
save_data<WORD_SIZE, DATA_SIZE>("./words_data.csv", "./pairs_CPU.csv", data_vec, ham1_pairs);
break;
case 2:
load_data<WORD_SIZE, DATA_SIZE>("./words_data.csv", "./pairs_CPU.csv", data_vec, ham1_pairs);
updated_data_GPU = false;
break;
case 3:
break;
default:
std::cout << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
menu_choice = 2;
break;
case 3:
if (!data_vec.empty()) {
d_subwords = move_data_to_GPU<WORD_SIZE, DATA_SIZE>(data_vec);
updated_data_GPU = true;
}
else
std::cout << std::endl << "!!! Generate / Load Data before attempting to move the data to GPU !!!" << std::endl << std::endl;
break;
case 4:
std::cout << std::endl;
if (!data_vec.empty()) {
while (menu_choice != 3)
{
std::cout << "1. Use CPU" << std::endl;
if (d_subwords.empty())
std::cout << "2. Use GPU - !!! No Data on GPU !!!" << std::endl;
else if (!updated_data_GPU)
std::cout << "2. Use GPU - !!! Data on GPU not matching Data on CPU !!!" << std::endl;
else
std::cout << "2. Use GPU" << std::endl;
std::cout << "3. Go Back" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
std::cout << std::endl;
switch (menu_choice)
{
case 1:
{
char c;
do {
std::cout << "Output pairs to console? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
find_ham1<WORD_SIZE>(data_vec, ham1_pairs, true, true);
break;
}
else if (c == 'n' || c == 'N') {
find_ham1<WORD_SIZE>(data_vec, ham1_pairs, true, false);
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
break;
}
case 2:
{
bool save_to_file = false;
bool out_to_console = false;
char c;
if (d_subwords.empty())
std::cout << std::endl << "!!! No Data on GPU !!!" << std::endl << std::endl;
else {
do {
std::cout << "Save pairs to file? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
save_to_file = true;
find_ham1_GPU<WORD_SIZE>(d_subwords, d_pair_flags, h_pair_flags, pair_flags_size, data_vec, ham1_pairs, save_to_file);
break;
}
else if (c == 'n' || c == 'N') {
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
if (save_to_file) break;
do {
std::cout << "Output pairs to console? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
out_to_console = true;
break;
}
else if (c == 'n' || c == 'N') {
out_to_console = false;
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
do {
std::cout << "Check pairs against CPU? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
find_ham1_GPU<WORD_SIZE>(d_subwords, d_pair_flags, h_pair_flags, pair_flags_size, data_vec, ham1_pairs, save_to_file, true, out_to_console, true);
break;
}
else if (c == 'n' || c == 'N') {
find_ham1_GPU<WORD_SIZE>(d_subwords, d_pair_flags, h_pair_flags, pair_flags_size, data_vec, ham1_pairs, save_to_file, true, out_to_console, false);
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
}
break;
}
case 3:
break;
default:
std::cout << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
}
else
std::cout << std::endl << "!!! Generate/Load Data before attempting to find pairs !!!" << std::endl << std::endl;
menu_choice = 4;
break;
case 5:
break;
case 6:
system("CLS");
break;
default:
std::cout << std::endl << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
return 0;
}
|
3c31c72dfc17190278dc50c6495cda238ac2c06e.cu
|
// includes, system
#include <stdio.h>
#include <random>
#include <bitset>
#include <unordered_set>
#include <vector>
#include <chrono>
#include <algorithm>
#include <limits>
#include <fstream>
// includes, cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// includes, thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
////////////////////////////////////////////////////////////////////////////////
#define WORD_SIZE 1000
#define DATA_SIZE 100000
#define UINT_BITSIZE (unsigned int)(8*sizeof(unsigned int))
#define SUBWORDS_PER_WORD(N) (unsigned int)(std::ceil((float)N / (sizeof(unsigned int) * 8.0f)))
////////////////////////////////////////////////////////////////////////////////
// function declarations
template<size_t N>
unsigned int hamming_distance(const typename std::bitset<N>& A, const typename std::bitset<N>& B);
template<size_t N>
typename std::bitset<N> random_bitset(double p);
template<size_t N>
unsigned int MSB(std::bitset<N> bitset);
template<size_t N, size_t M>
void generate_random_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount = true, const bool consoleOutput = true, const float p = 0.5f);
template<size_t N, size_t M>
void generate_predictable_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount = true, const bool consoleOutput = true);
template<size_t N, size_t M>
void load_data(const char* words_filepath, const char* pairs_filepath, typename std::vector<std::bitset<N>>& _data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs);
template<size_t N, size_t M>
void save_data(const char* words_filepath, const char* pairs_filepath, const typename std::vector<std::bitset<N>>& _data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs);
template<size_t N>
void find_ham1(const typename std::vector<std::bitset<N>>& _data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool timeCount = true, const bool pairsOutput = true);
template<size_t N, size_t M>
thrust::device_vector<unsigned int> move_data_to_GPU(const typename std::vector<std::bitset<N>>& data_vec);
__global__ void find_ham1_GPU_ker(const unsigned int* subwords, unsigned int* pair_flags, const unsigned int subwords_per_pair_flags);
__global__ void count_ones(unsigned int* d_data, size_t pair_flags_size);
template<size_t N>
void find_ham1_GPU(thrust::device_vector<unsigned int>& d_subwords, \
thrust::device_vector<unsigned int>& d_pair_flags, \
thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool save_to_file, const bool timeCount = true, const bool pairsOutput = false, const bool checkData = false);
template<size_t N>
void process_pairs_from_flags(thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool checkData, const bool pairsOutput, const bool saveToFile = false, const char* filepath = "", unsigned int pairs_count = 0);
////////////////////////////////////////////////////////////////////////////////
// word generating function - from bernoulli distribution
template<size_t N> // p = 0.5 gives equal chance for 0's and 1's to occur
typename std::bitset<N> random_bitset(double p)
{
typename std::bitset<N> bits;
std::random_device rd;
std::mt19937 gen(rd());
std::bernoulli_distribution dist(p);
for (size_t i = 0; i < N; ++i) {
bits[i] = dist(gen);
}
return bits;
}
////////////////////////////////////////////////////////////////////////////////
// fins MSB index in bitset
template<size_t N>
unsigned int MSB(std::bitset<N> bitset)
{
if (!bitset.to_ulong())
return 0;
int msb = 0;
while (bitset.to_ulong() != 1)
{
bitset >>= 1;
++msb;
}
return msb;
}
////////////////////////////////////////////////////////////////////////////////
// data generating function
template<size_t N, size_t M>
void generate_random_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount, const bool consoleOutput, const float p)
{
std::unordered_set<std::bitset<N>> data_uset;
data_uset.reserve(M);
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
if (consoleOutput) std::cout << "Beginning Data Generation...\n";
// Record start time
if (consoleOutput && timeCount) start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < M; ++i)
{
while (false == (data_uset.emplace(random_bitset<N>(p)).second));
}
// Record end time
if (consoleOutput && timeCount) finish = std::chrono::high_resolution_clock::now();
// Copy to vector
for (const auto& it : data_uset)
{
_data_vec.emplace_back(it);
}
if (consoleOutput)
{
if (timeCount) elapsed = finish - start;
std::cout << "Data Generation Finished!\n";
if (timeCount) std::cout << "Elapsed time: " << elapsed.count() << " s\n";
std::cout << std::endl;
}
}
template<size_t N, size_t M>
void generate_predictable_data(typename std::vector<std::bitset<N>>& _data_vec, \
const bool timeCount, const bool consoleOutput)
{
std::unordered_set<std::bitset<N>> data_uset; // used for uniqueness
data_uset.reserve(M);
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
unsigned int starting_word_counter = 0;
std::bitset<N> starting_word;
std::bitset<N> current_word;
if (consoleOutput) std::cout << "Beginning Data Generation...\n";
// Record start time
if (consoleOutput && timeCount) start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < M;)
{
starting_word = std::bitset<N>(starting_word_counter++);
// Check if starting word can be placed into vector
if (data_uset.emplace(starting_word).second)
{
_data_vec.emplace_back(starting_word);
++i;
}
// Words are created by setting one of the bits before MSB to 1
for (size_t j = starting_word.to_ulong() == 0 ? 0 : MSB(starting_word) + 1; j < N && i < M; ++j)
{
current_word = std::bitset<N>(0);
current_word[j] = 1;
current_word = current_word ^ starting_word;
// Check if current word can be placed into vector
if (data_uset.emplace(current_word).second) {
_data_vec.emplace_back(current_word);
++i;
}
}
}
// Record end time
if (consoleOutput && timeCount) finish = std::chrono::high_resolution_clock::now();
if (consoleOutput)
{
if (timeCount) elapsed = finish - start;
std::cout << "Data Generation Finished!\n";
if (_data_vec.size() != M) std::cout << "Wrong Data Size: " << _data_vec.size() << std::endl;
if (timeCount) std::cout << "Elapsed time: " << elapsed.count() << " s\n";
std::cout << std::endl;
}
}
////////////////////////////////////////////////////////////////////////////////
// data loading function
template<size_t N, size_t M>
void load_data(const char* words_filepath, const char* pairs_filepath, typename std::vector<std::bitset<N>>& _data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs)
{
size_t pairs_count = 0;
std::string line, number;
std::string separator = ";";
size_t sep_pos = 0;
std::ifstream words_file;
words_file.open(words_filepath);
if (!words_file.good()) {
std::cout << "Error opening words_file\n\n";
return;
}
_data_vec.clear();
// discard first line(header)
std::getline(words_file, line);
//check if WORD_SIZE/DATA_SIZE the same as defined in program
std::getline(words_file, line);
sep_pos = line.find(separator);
if (sep_pos == std::string::npos) {
std::cout << "Error(words_file) - wrong formatting\n\n";
return;
}
if (std::stoi(line.substr(0, sep_pos)) != N) {
std::cout << "Error(words_file) - WORD_SIZE different\n\n";
return;
}
if (std::stoi(line.substr(sep_pos + 1)) != M) {
std::cout << "Error(words_file) - DATA_SIZE different\n\n";
return;
}
// main words_file loop
for (size_t i = 0; i < M; ++i) {
std::getline(words_file, line);
_data_vec.emplace_back(std::bitset<N>(line));
}
std::ifstream pairs_file;
pairs_file.open(pairs_filepath);
if (!words_file.good()) {
std::cout << "Error opening pairs_file\n\n";
return;
}
ham1_pairs.clear();
// discard first line(header)
std::getline(pairs_file, line);
//check if WORD_SIZE the same as defined in program
std::getline(pairs_file, line);
sep_pos = line.find(separator);
if (sep_pos == std::string::npos) {
std::cout << "Error(pairs_file) - wrong formatting\n\n";
return;
}
if (std::stoi(line.substr(0, sep_pos)) != N) {
std::cout << "Error(pairs_file) - WORD_SIZE different\n\n";
return;
}
pairs_count = std::stoi(line.substr(sep_pos + 1));
// main pairs_file loop
for (size_t i = 0; i < pairs_count; ++i) {
std::getline(pairs_file, line);
sep_pos = line.find(separator);
ham1_pairs.emplace_back(std::make_pair<std::bitset<N>, std::bitset<N>>(std::bitset<N>(line.substr(0, sep_pos)), std::bitset<N>(line.substr(sep_pos + 1))));
}
pairs_file.close();
std::cout << "Loading Data successful!" << std::endl << std::endl;
}
////////////////////////////////////////////////////////////////////////////////
// data saving function
template<size_t N, size_t M>
void save_data(const char* words_filepath, const char* pairs_filepath, const typename std::vector<std::bitset<N>>& _data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs)
{
if (_data_vec.empty()) {
std::cout << "Words vector is empty!";
return;
}
std::ofstream words_file;
std::remove(words_filepath);
words_file.open(words_filepath);
words_file << "WORD_SIZE;DATA_SIZE\n";
words_file << N << ';' << M << "\n";
// main words_file loop
for (size_t i = 0; i < M; ++i)
words_file << _data_vec[i].to_string() << "\n";
words_file.close();
if (ham1_pairs.empty()) {
std::cout << "Saving Data successful!" << std::endl << std::endl;
return;
}
std::ofstream pairs_file;
std::remove(pairs_filepath);
pairs_file.open(pairs_filepath);
pairs_file << "WORD_SIZE;PAIRS_COUNT\n";
pairs_file << N << ';' << ham1_pairs.size() << "\n";
// main pairs_file loop
for (size_t i = 0; i < ham1_pairs.size(); ++i)
pairs_file << ham1_pairs[i].first.to_string() << ';' << ham1_pairs[i].second.to_string() << "\n";
pairs_file.close();
std::cout << "Saving Data successful!" << std::endl << std::endl;
}
////////////////////////////////////////////////////////////////////////////////
// finding pairs with hamming distance 1 on CPU
template<size_t N>
void find_ham1(const typename std::vector<std::bitset<N>>& data_vec, \
typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool timeCount, const bool pairsOutput)
{
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
std::cout << "Looking for pairs with hamming distance 1 ...\n";
ham1_pairs.clear();
// Record start time
if (timeCount) start = std::chrono::high_resolution_clock::now();
unsigned int ham1 = 0;
for (auto it1 = std::begin(data_vec); it1 != std::end(data_vec); ++it1)
{
for (auto it2 = std::next(it1); it2 != std::end(data_vec); ++it2)
{
if (1 == hamming_distance<N>(*it1, *it2))
{
ham1_pairs.emplace_back(std::make_pair<std::bitset<N>, std::bitset<N>>(std::bitset<N>(*it1), std::bitset<N>(*it2)));
++ham1;
}
}
}
// Record end time
if (timeCount) finish = std::chrono::high_resolution_clock::now();
if (timeCount) elapsed = finish - start;
std::cout << "Finished!\n";
if (timeCount) std::cout << "Elapsed time: " << elapsed.count() << " s\n";
std::cout << ham1 << " pairs found\n\n";
if (ham1 && pairsOutput)
{
std::cout << "Pairs found:\n";
for (const auto& it : ham1_pairs)
{
std::cout << it.first << " " << it.second << std::endl;
}
std::cout << std::endl;
}
}
////////////////////////////////////////////////////////////////////////////////
// hamming distance function
template<size_t N>
unsigned int hamming_distance(const typename std::bitset<N>& A, const typename std::bitset<N>& B)
{
return (A ^ B).count();
}
////////////////////////////////////////////////////////////////////////////////
// move data to gpu
template<size_t N, size_t M>
thrust::device_vector<unsigned int> move_data_to_GPU(const typename std::vector<std::bitset<N>>& data_vec)
{
//N - WORD_SIZE, M - DATA_SIZE
thrust::host_vector<unsigned int> h_words(M * SUBWORDS_PER_WORD(N));
thrust::device_vector<unsigned int> d_words;
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
// Record start time
start = std::chrono::high_resolution_clock::now();
int i = 0; // index in h_words
if (N < UINT_BITSIZE)
{
for (const auto& word_bitset : data_vec)
{
std::string word_str = word_bitset.to_string().substr(0, N);
// add padding bits
for (size_t word_str_size = N; word_str_size < UINT_BITSIZE; ++word_str_size)
word_str += "0";
unsigned int word = (unsigned int)(std::bitset<N>(word_str).to_ulong());
h_words[i++] = word;
}
}
else
{
size_t j = 0; // currently processed subwords
for (; j + UINT_BITSIZE < N; j += UINT_BITSIZE)
{
for (const auto& word_bitset : data_vec)
{
std::string subword_str = word_bitset.to_string().substr(j, UINT_BITSIZE);
unsigned int subword = (unsigned int)(std::bitset<N>(subword_str).to_ulong());
h_words[i++] = subword;
}
}
if (j + UINT_BITSIZE != N) // last subword smaller than UINT_BITSIZE
{
for (const auto& word_bitset : data_vec)
{
std::string subword_str = word_bitset.to_string().substr(j, N - j);
for (size_t subword_str_size = N - j; subword_str_size < UINT_BITSIZE; ++subword_str_size)
subword_str += "0";
unsigned int subword = (unsigned int)(std::bitset<N>(subword_str).to_ulong());
h_words[i++] = subword;
}
}
}
d_words = h_words;
// Record end time
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
std::cout << std::endl << "Data moved to GPU" << std::endl;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl << std::endl;
return d_words;
}
////////////////////////////////////////////////////////////////////////////////
// HammingOne kernel
__global__ void find_ham1_GPU_ker(const unsigned int* subwords, unsigned int* pair_flags, const unsigned int subwords_per_pair_flags)
{
const unsigned int word_idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int subwords_per_word = SUBWORDS_PER_WORD(WORD_SIZE);
if (word_idx >= DATA_SIZE) return;
unsigned int hamming_distance, flag_subword_offset, flag_in_subword;
// comparison_idx - index of word that word under word_idx is being compared to
for (size_t comparison_idx = word_idx + 1; comparison_idx < DATA_SIZE; ++comparison_idx)
{
hamming_distance = 0;
for (size_t i = 0; i < subwords_per_word && hamming_distance < 2; ++i)
hamming_distance += __popc(subwords[word_idx + DATA_SIZE * i] ^ subwords[comparison_idx + DATA_SIZE * i]);
// each word has at least DATA_SIZE flags, flags for matches are set on match's index in CPU data
if (hamming_distance && !(hamming_distance >> 1)) // true when hamming_distance == 1
{
flag_subword_offset = comparison_idx / UINT_BITSIZE;
flag_in_subword = 1 << UINT_BITSIZE - 1 - comparison_idx % UINT_BITSIZE;
pair_flags[word_idx * subwords_per_pair_flags + flag_subword_offset] |= flag_in_subword;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Counting kernel
__global__ void count_ones(unsigned int* d_data, size_t pair_flags_size)
{
const unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= pair_flags_size) return;
d_data[tid] = __popc(d_data[tid]);
}
////////////////////////////////////////////////////////////////////////////////
// finding pairs with hamming distance 1 on GPU
template<size_t N>
void find_ham1_GPU(thrust::device_vector<unsigned int>& d_subwords, \
thrust::device_vector<unsigned int>& d_pair_flags, \
thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool save_to_file, const bool timeCount, const bool pairsOutput, const bool checkData)
{
// vars with 2 are for kernel processing flags
unsigned int threads = 512;
unsigned int threads2 = 512;
unsigned int blocks = (unsigned int)std::ceil(DATA_SIZE / (double)threads);
unsigned int blocks2 = (unsigned int)std::ceil(pair_flags_size / (double)threads);
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock2(threads2, 1, 1);
dim3 dimGrid2(blocks2, 1, 1);
float elapsed;
cudaEvent_t start, stop;
const unsigned int subwords_per_pair_flags = pair_flags_size / DATA_SIZE;
auto d_subwords_ptr = thrust::raw_pointer_cast(d_subwords.begin().base());
auto d_pair_flags_ptr = thrust::raw_pointer_cast(d_pair_flags.begin().base());
unsigned int pairs_count_GPU = 0;
if (timeCount) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
std::cout << "Looking for pairs with hamming distance 1 ...\n";
if (timeCount) cudaEventRecord(start, 0);
find_ham1_GPU_ker<<<dimGrid, dimBlock>>>(d_subwords_ptr, d_pair_flags_ptr, subwords_per_pair_flags);
if (timeCount) cudaEventRecord(stop, 0);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
if (timeCount) cudaEventElapsedTime(&elapsed, start, stop);
std::cout << "Finished!\n";
if (timeCount) std::cout << "Elapsed time: " << elapsed << " ms\n";
h_pair_flags = d_pair_flags;
// get number of pairs from pair_flags
count_ones<<<dimGrid2, dimBlock2>>>(thrust::raw_pointer_cast(d_pair_flags.begin().base()), pair_flags_size);
err = cudaGetLastError();
if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
pairs_count_GPU = thrust::reduce(d_pair_flags.begin(), d_pair_flags.end()); // d_pair_flags invalidated
d_pair_flags = h_pair_flags; // reset to correct value
std::cout << pairs_count_GPU << " pairs found\n\n";
if (pairs_count_GPU) {
if (save_to_file)
process_pairs_from_flags<N>(h_pair_flags, pair_flags_size, data_vec, ham1_pairs, checkData, pairsOutput, true, "./pairs_GPU.csv", pairs_count_GPU);
else
process_pairs_from_flags<N>(h_pair_flags, pair_flags_size, data_vec, ham1_pairs, checkData, pairsOutput);
}
else
process_pairs_from_flags<N>(h_pair_flags, pair_flags_size, data_vec, ham1_pairs, checkData, false);
if (timeCount) {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
}
////////////////////////////////////////////////////////////////////////////////
// pairs_flag to pairs output
template<size_t N>
void process_pairs_from_flags(thrust::host_vector<unsigned int>& h_pair_flags, size_t pair_flags_size, \
const typename std::vector<std::bitset<N>>& data_vec, \
const typename std::vector<std::pair<std::bitset<N>, std::bitset<N>>>& ham1_pairs, \
const bool checkData, const bool pairsOutput, const bool saveToFile, const char* filepath, unsigned int pairs_count)
{
const unsigned int subwords_per_word_flags = pair_flags_size / DATA_SIZE;
std::chrono::steady_clock::time_point start, finish;
std::chrono::duration<double> elapsed;
if (saveToFile) {
std::ofstream file;
std::remove(filepath);
// Record start time
start = std::chrono::high_resolution_clock::now();
file.open(filepath);
file << "WORD_SIZE;PAIRS_COUNT\n";
file << N << ';' << pairs_count << "\n";
for (size_t word_idx = 0; word_idx < DATA_SIZE; ++word_idx)
{
bool flag_found = false;
unsigned int* word_flags = new unsigned int[subwords_per_word_flags];
// Get flags of current word
for (size_t i = 0; i < subwords_per_word_flags; ++i)
word_flags[i] = h_pair_flags[word_idx * subwords_per_word_flags + i];
// Check if the word has any match
for (size_t i = 0; i < subwords_per_word_flags; ++i)
{
if (word_flags[i]) {
flag_found = true;
break;
}
}
if (!flag_found) continue; // Process next word if current has no pairs found
for (int i = subwords_per_word_flags - 1; i >= 0; --i)
{
if (!word_flags[i]) continue; // Process next subset if current has no pairs found
int flags_set = __popcnt(word_flags[i]); // Matches in current flag subset
int flag_pos = (i + 1) * UINT_BITSIZE - 1; // Index of match in CPU data vector
size_t j = 0; // j - matches processed
while (j < flags_set)
{
if (word_flags[i] % 2) {
file << data_vec[word_idx].to_string() << ';' << data_vec[flag_pos].to_string() << "\n";
++j;
}
word_flags[i] = word_flags[i] >> 1;
--flag_pos;
}
}
delete[] word_flags;
}
file.close();
// Record end time
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
std::cout << "Saving Data successful!" << std::endl;
std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl << std::endl;
}
else {
if (!checkData && !pairsOutput) return;
bool dataCorrect = true;
if (pairsOutput) std::cout << "Pairs found:\n";
// Record start time
start = std::chrono::high_resolution_clock::now();
for (size_t word_idx = 0; word_idx < DATA_SIZE; ++word_idx)
{
bool flag_found = false;
unsigned int* word_flags = new unsigned int[subwords_per_word_flags];
// Get flags of current word
for (size_t i = 0; i < subwords_per_word_flags; ++i)
word_flags[i] = h_pair_flags[word_idx * subwords_per_word_flags + i];
// Check if the word has any match
for (size_t i = 0; i < subwords_per_word_flags; ++i)
{
if (word_flags[i]) {
flag_found = true;
break;
}
}
if (!flag_found) continue; // Process next word if current has no pairs found
for (int i = subwords_per_word_flags - 1; i >= 0; --i)
{
if (!word_flags[i]) continue; // Process next subset if current has no pairs found
int flags_set = __popcnt(word_flags[i]); // Matches in current flag subset
int flag_pos = (i + 1) * UINT_BITSIZE - 1; // Index of match in CPU data vector
size_t j = 0; // j - matches processed
while (j < flags_set)
{
if (word_flags[i] % 2) {
if (checkData) {
std::pair<std::bitset<N>, std::bitset<N>> pair = std::make_pair<std::bitset<N>, std::bitset<N>>(std::bitset<N>(data_vec[word_idx].to_string()), std::bitset<N>(data_vec[flag_pos].to_string()));
if (std::end(ham1_pairs) == std::find(std::begin(ham1_pairs), std::end(ham1_pairs), pair)) {
std::cout << "No matching pair found in CPU Data" << std::endl;
dataCorrect = false;
}
}
if (pairsOutput) std::cout << data_vec[word_idx] << " " << data_vec[flag_pos] << std::endl;
++j;
}
word_flags[i] = word_flags[i] >> 1;
--flag_pos;
}
}
delete[] word_flags;
}
// Record end time
finish = std::chrono::high_resolution_clock::now();
elapsed = finish - start;
if (checkData && dataCorrect) {
if (pairsOutput) std::cout << std::endl;
std::cout << "GPU Data is consistent with CPU Data" << std::endl << std::endl;
}
else if (pairsOutput || !dataCorrect) std::cout << std::endl;
if (checkData) std::cout << "Elapsed time: " << elapsed.count() << " s" << std::endl << std::endl;
}
}
////////////////////////////////////////////////////////////////////////////////
int main()
{
bool updated_data_GPU = true;
unsigned short menu_choice = 0;
size_t pair_flags_size = DATA_SIZE * (std::ceil((double)DATA_SIZE / (double)UINT_BITSIZE));
thrust::device_vector<unsigned int> d_subwords;
thrust::device_vector<unsigned int> d_pair_flags(pair_flags_size, 0);
thrust::host_vector<unsigned int> h_pair_flags;
std::vector<std::bitset<WORD_SIZE>> data_vec;
std::vector<std::pair<std::bitset<WORD_SIZE>, std::bitset<WORD_SIZE>>> ham1_pairs;
while (menu_choice != 5) {
std::cout << "1. Generate Data" << std::endl;
std::cout << "2. Save/Load Data" << std::endl;
if (!data_vec.empty()) {
if (d_subwords.empty())
std::cout << "3. Move Data to GPU - !!! No Data on GPU !!!" << std::endl;
else if (!updated_data_GPU)
std::cout << "3. Move Data to GPU - !!! Data on GPU not matching Data on CPU !!!" << std::endl;
else
std::cout << "3. Move Data to GPU" << std::endl;
std::cout << "4. Find Pairs" << std::endl;
}
else {
std::cout << "3. Move Data to GPU - !!! Generate/Load Data before attempting to move the data to GPU !!!" << std::endl;
std::cout << "4. Find Pairs - !!! Generate/Load Data before attempting to find pairs !!!" << std::endl;
}
std::cout << "5. Exit" << std::endl;
std::cout << "6. Clear Console" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
switch (menu_choice)
{
case 1:
std::cout << std::endl;
while (menu_choice != 3)
{
std::cout << "1. Generate Random Data" << std::endl;
std::cout << "2. Generate Predictable Data" << std::endl;
std::cout << "3. Go Back" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
std::cout << std::endl;
switch (menu_choice)
{
case 1:
if (!data_vec.empty())
data_vec.clear();
data_vec.reserve(DATA_SIZE);
if (!ham1_pairs.empty())
ham1_pairs.clear();
generate_random_data<WORD_SIZE, DATA_SIZE>(data_vec);
updated_data_GPU = false;
break;
case 2:
if (!data_vec.empty())
data_vec.clear();
data_vec.reserve(DATA_SIZE);
if (!ham1_pairs.empty())
ham1_pairs.clear();
generate_predictable_data<WORD_SIZE, DATA_SIZE>(data_vec);
updated_data_GPU = false;
break;
case 3:
break;
default:
std::cout << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
menu_choice = 1;
break;
case 2:
std::cout << std::endl;
while (menu_choice != 3)
{
std::cout << "1. Save Data" << std::endl;
std::cout << "2. Load Data" << std::endl;
std::cout << "3. Go Back" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
std::cout << std::endl;
switch (menu_choice)
{
case 1:
save_data<WORD_SIZE, DATA_SIZE>("./words_data.csv", "./pairs_CPU.csv", data_vec, ham1_pairs);
break;
case 2:
load_data<WORD_SIZE, DATA_SIZE>("./words_data.csv", "./pairs_CPU.csv", data_vec, ham1_pairs);
updated_data_GPU = false;
break;
case 3:
break;
default:
std::cout << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
menu_choice = 2;
break;
case 3:
if (!data_vec.empty()) {
d_subwords = move_data_to_GPU<WORD_SIZE, DATA_SIZE>(data_vec);
updated_data_GPU = true;
}
else
std::cout << std::endl << "!!! Generate / Load Data before attempting to move the data to GPU !!!" << std::endl << std::endl;
break;
case 4:
std::cout << std::endl;
if (!data_vec.empty()) {
while (menu_choice != 3)
{
std::cout << "1. Use CPU" << std::endl;
if (d_subwords.empty())
std::cout << "2. Use GPU - !!! No Data on GPU !!!" << std::endl;
else if (!updated_data_GPU)
std::cout << "2. Use GPU - !!! Data on GPU not matching Data on CPU !!!" << std::endl;
else
std::cout << "2. Use GPU" << std::endl;
std::cout << "3. Go Back" << std::endl;
std::cout << "Choice: ";
std::cin >> menu_choice;
std::cout << std::endl;
switch (menu_choice)
{
case 1:
{
char c;
do {
std::cout << "Output pairs to console? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
find_ham1<WORD_SIZE>(data_vec, ham1_pairs, true, true);
break;
}
else if (c == 'n' || c == 'N') {
find_ham1<WORD_SIZE>(data_vec, ham1_pairs, true, false);
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
break;
}
case 2:
{
bool save_to_file = false;
bool out_to_console = false;
char c;
if (d_subwords.empty())
std::cout << std::endl << "!!! No Data on GPU !!!" << std::endl << std::endl;
else {
do {
std::cout << "Save pairs to file? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
save_to_file = true;
find_ham1_GPU<WORD_SIZE>(d_subwords, d_pair_flags, h_pair_flags, pair_flags_size, data_vec, ham1_pairs, save_to_file);
break;
}
else if (c == 'n' || c == 'N') {
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
if (save_to_file) break;
do {
std::cout << "Output pairs to console? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
out_to_console = true;
break;
}
else if (c == 'n' || c == 'N') {
out_to_console = false;
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
do {
std::cout << "Check pairs against CPU? (y/n):";
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
c = std::getc(stdin);
if (c == 'y' || c == 'Y') {
find_ham1_GPU<WORD_SIZE>(d_subwords, d_pair_flags, h_pair_flags, pair_flags_size, data_vec, ham1_pairs, save_to_file, true, out_to_console, true);
break;
}
else if (c == 'n' || c == 'N') {
find_ham1_GPU<WORD_SIZE>(d_subwords, d_pair_flags, h_pair_flags, pair_flags_size, data_vec, ham1_pairs, save_to_file, true, out_to_console, false);
break;
}
std::cout << "Please provide a valid choice" << std::endl;
} while (true);
}
break;
}
case 3:
break;
default:
std::cout << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
}
else
std::cout << std::endl << "!!! Generate/Load Data before attempting to find pairs !!!" << std::endl << std::endl;
menu_choice = 4;
break;
case 5:
break;
case 6:
system("CLS");
break;
default:
std::cout << std::endl << "Please provide a valid choice" << std::endl << std::endl;
break;
}
}
return 0;
}
|
da9caf700065b20268f8a9aa0ca4d92c69634baf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include "common.h"
#include "bmp.h"
#include <stdlib.h>
#include <GL/freeglut.h>
#define DIM 512
#define blockSize 8
#define PI 3.1415926535897932f
#define centerX (DIM/2)
#define centerY (DIM/2)
float sourceColors[DIM*DIM]; // host memory for source image
float readBackPixels[DIM*DIM]; // host memory for swirled image
float *sourceDevPtr; // device memory for source image
float *swirlDevPtr; // device memory for swirled image
// DONE: Add host variables to control the swirl
float a = 0.f;
float b = 0.f;
__global__ void swirlKernel( float *sourcePtr, float *targetPtr, float a, float b)
{
int index = 0;
// DONE: Index berechnen
index = threadIdx.x + blockIdx.x * blockDim.x;
// DONE: Den Swirl invertieren.
// Add variables
float r = 0.f;
float angle = 0.f;
// Create a vector to get the difference between center and position
float x = index % DIM;
float y = index / DIM;
x = x - centerX;
y = y - centerY;
// Compute radius of actual pixel via length of an vector using CUDA Math
// http://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE
r = hypotf(x, y);
angle = a * powf(r, b);
// Compute rotated vector
float x2 = x * cosf(angle) - y * sinf(angle);
float y2 = x * sinf(angle) + y * cosf(angle);
// Add the center to the rotated vector to obtain the original position after rotation
x2 = x2 + centerX;
y2 = y2 + centerY;
// Transform rotated vector into 1d
int index2 = int(x2) + int(y2) * blockDim.x;
// Borders: outside: pass through | inside: set new pixel
if (index2 < 0 || index2 > DIM*DIM)
targetPtr[index] = sourcePtr[index];
else
targetPtr[index] = sourcePtr[index2];
}
void display(void)
{
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// DONE: Swirl Kernel aufrufen.
hipLaunchKernelGGL(( swirlKernel) , dim3(DIM), dim3(DIM) , 0, 0, sourceDevPtr, swirlDevPtr, a, b);
// DONE: Ergebnis zu host memory zuruecklesen.
CUDA_SAFE_CALL(hipMemcpy(readBackPixels, swirlDevPtr, sizeof(readBackPixels), hipMemcpyDeviceToHost));
// Ergebnis zeichnen (ja, jetzt gehts direkt wieder zur GPU zurueck...)
glDrawPixels( DIM, DIM, GL_LUMINANCE, GL_FLOAT, readBackPixels );
glutSwapBuffers();
}
// clean up memory allocated on the GPU
void cleanup() {
CUDA_SAFE_CALL( hipFree( sourceDevPtr ) );
CUDA_SAFE_CALL( hipFree( swirlDevPtr ) );
}
// GLUT callback function for keyboard input
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 27:
exit(0);
break;
case 'q': // decrese a
a = a - 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
case 'w': // increse a
a = a + 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
case 'a': // decrese b
b = b - 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
case 's': // increse b
b = b + 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
}
glutPostRedisplay();
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("Simple OpenGL CUDA");
glutKeyboardFunc(keyboard);
glutIdleFunc(display);
glutDisplayFunc(display);
std::cout << "Keys:" << std::endl;
std::cout << " Modifiy a: -(q), +(w)" << std::endl;
std::cout << " Modifiy b: -(a), +(s)" << std::endl;
// load bitmap
Bitmap bmp = Bitmap("who-is-that.bmp");
if (bmp.isValid())
{
for (int i = 0 ; i < DIM*DIM ; i++) {
sourceColors[i] = bmp.getR(i/DIM, i%DIM) / 255.0f;
}
}
// DONE: allocate memory at sourceDevPtr on the GPU and copy sourceColors into it.
CUDA_SAFE_CALL(hipMalloc((void**)&sourceDevPtr, sizeof(sourceColors)));
CUDA_SAFE_CALL(hipMemcpy(sourceDevPtr, sourceColors, sizeof(sourceColors), hipMemcpyHostToDevice));
// DONE: allocate memory at swirlDevPtr for the unswirled image.
CUDA_SAFE_CALL(hipMalloc((void**)&swirlDevPtr, sizeof(readBackPixels)));
glutMainLoop();
cleanup();
}
|
da9caf700065b20268f8a9aa0ca4d92c69634baf.cu
|
#include <stdio.h>
#include <math.h>
#include "common.h"
#include "bmp.h"
#include <stdlib.h>
#include <GL/freeglut.h>
#define DIM 512
#define blockSize 8
#define PI 3.1415926535897932f
#define centerX (DIM/2)
#define centerY (DIM/2)
float sourceColors[DIM*DIM]; // host memory for source image
float readBackPixels[DIM*DIM]; // host memory for swirled image
float *sourceDevPtr; // device memory for source image
float *swirlDevPtr; // device memory for swirled image
// DONE: Add host variables to control the swirl
float a = 0.f;
float b = 0.f;
__global__ void swirlKernel( float *sourcePtr, float *targetPtr, float a, float b)
{
int index = 0;
// DONE: Index berechnen
index = threadIdx.x + blockIdx.x * blockDim.x;
// DONE: Den Swirl invertieren.
// Add variables
float r = 0.f;
float angle = 0.f;
// Create a vector to get the difference between center and position
float x = index % DIM;
float y = index / DIM;
x = x - centerX;
y = y - centerY;
// Compute radius of actual pixel via length of an vector using CUDA Math
// http://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE
r = hypotf(x, y);
angle = a * powf(r, b);
// Compute rotated vector
float x2 = x * cosf(angle) - y * sinf(angle);
float y2 = x * sinf(angle) + y * cosf(angle);
// Add the center to the rotated vector to obtain the original position after rotation
x2 = x2 + centerX;
y2 = y2 + centerY;
// Transform rotated vector into 1d
int index2 = int(x2) + int(y2) * blockDim.x;
// Borders: outside: pass through | inside: set new pixel
if (index2 < 0 || index2 > DIM*DIM)
targetPtr[index] = sourcePtr[index];
else
targetPtr[index] = sourcePtr[index2];
}
void display(void)
{
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// DONE: Swirl Kernel aufrufen.
swirlKernel <<<DIM, DIM >>>(sourceDevPtr, swirlDevPtr, a, b);
// DONE: Ergebnis zu host memory zuruecklesen.
CUDA_SAFE_CALL(cudaMemcpy(readBackPixels, swirlDevPtr, sizeof(readBackPixels), cudaMemcpyDeviceToHost));
// Ergebnis zeichnen (ja, jetzt gehts direkt wieder zur GPU zurueck...)
glDrawPixels( DIM, DIM, GL_LUMINANCE, GL_FLOAT, readBackPixels );
glutSwapBuffers();
}
// clean up memory allocated on the GPU
void cleanup() {
CUDA_SAFE_CALL( cudaFree( sourceDevPtr ) );
CUDA_SAFE_CALL( cudaFree( swirlDevPtr ) );
}
// GLUT callback function for keyboard input
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 27:
exit(0);
break;
case 'q': // decrese a
a = a - 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
case 'w': // increse a
a = a + 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
case 'a': // decrese b
b = b - 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
case 's': // increse b
b = b + 0.01f;
printf("a: %.2f , b: %.2f \r", a, b);
break;
}
glutPostRedisplay();
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(DIM, DIM);
glutCreateWindow("Simple OpenGL CUDA");
glutKeyboardFunc(keyboard);
glutIdleFunc(display);
glutDisplayFunc(display);
std::cout << "Keys:" << std::endl;
std::cout << " Modifiy a: -(q), +(w)" << std::endl;
std::cout << " Modifiy b: -(a), +(s)" << std::endl;
// load bitmap
Bitmap bmp = Bitmap("who-is-that.bmp");
if (bmp.isValid())
{
for (int i = 0 ; i < DIM*DIM ; i++) {
sourceColors[i] = bmp.getR(i/DIM, i%DIM) / 255.0f;
}
}
// DONE: allocate memory at sourceDevPtr on the GPU and copy sourceColors into it.
CUDA_SAFE_CALL(cudaMalloc((void**)&sourceDevPtr, sizeof(sourceColors)));
CUDA_SAFE_CALL(cudaMemcpy(sourceDevPtr, sourceColors, sizeof(sourceColors), cudaMemcpyHostToDevice));
// DONE: allocate memory at swirlDevPtr for the unswirled image.
CUDA_SAFE_CALL(cudaMalloc((void**)&swirlDevPtr, sizeof(readBackPixels)));
glutMainLoop();
cleanup();
}
|
70bf42300e702fbdb9a337ac1644ea613880238f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute_cutoff.h"
#include "misc.h"
#include "common_def.h"
__device__ void compute_cutoff(shared_thread_s* shared, int count, int* cutoff_res, int* timestamp_res)
{
int tid = threadIdx.x;
bool odd = count % 2;
for( int s=count/2; s>0; s>>=1)
{
if( tid < s)
{
shared[tid].next_cutoff = _min(shared[tid].next_cutoff,shared[tid+s].next_cutoff);
shared[tid].timestamp = _max(shared[tid].timestamp,shared[tid+s].timestamp);
if( tid == s-1 && odd)
{
shared[tid].next_cutoff = _min(shared[tid].next_cutoff,shared[tid+s+1].next_cutoff);
shared[tid].timestamp = _max(shared[tid].timestamp,shared[tid+s+1].timestamp);
}
}
odd = s % 2;
__syncthreads();
}
if( !tid)
{
cutoff_res[blockIdx.x] = shared[0].next_cutoff;
timestamp_res[blockIdx.x] = shared[0].timestamp;
}
}
|
70bf42300e702fbdb9a337ac1644ea613880238f.cu
|
#include "compute_cutoff.h"
#include "misc.h"
#include "common_def.h"
__device__ void compute_cutoff(shared_thread_s* shared, int count, int* cutoff_res, int* timestamp_res)
{
int tid = threadIdx.x;
bool odd = count % 2;
for( int s=count/2; s>0; s>>=1)
{
if( tid < s)
{
shared[tid].next_cutoff = _min(shared[tid].next_cutoff,shared[tid+s].next_cutoff);
shared[tid].timestamp = _max(shared[tid].timestamp,shared[tid+s].timestamp);
if( tid == s-1 && odd)
{
shared[tid].next_cutoff = _min(shared[tid].next_cutoff,shared[tid+s+1].next_cutoff);
shared[tid].timestamp = _max(shared[tid].timestamp,shared[tid+s+1].timestamp);
}
}
odd = s % 2;
__syncthreads();
}
if( !tid)
{
cutoff_res[blockIdx.x] = shared[0].next_cutoff;
timestamp_res[blockIdx.x] = shared[0].timestamp;
}
}
|
80b545bc0e10a5e637011efabf535ba0fbc6704c.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
//#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int main(int argc, char **argv)
{
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
#ifdef _SYM
klee_make_symbolic(h_A, mem_size_A, "h_A_input");
klee_make_symbolic(h_B, mem_size_B, "h_B_input");
#else
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
#endif
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
|
80b545bc0e10a5e637011efabf535ba0fbc6704c.cu
|
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
//#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int main(int argc, char **argv)
{
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
#ifdef _SYM
klee_make_symbolic(h_A, mem_size_A, "h_A_input");
klee_make_symbolic(h_B, mem_size_B, "h_B_input");
#else
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
#endif
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
|
327cc9901ba644baf6bd3ee0054b5414df4c026c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void gpuMatMul(float * A, float * B, float *C,
int ROW_A, int COL_A, int COL_B);
void mat_mul_cuda_multi(float *A, float *B, float *C,
int ROW_A, int COL_A, int COL_B) {
/******************** TODO *********************/
}
|
327cc9901ba644baf6bd3ee0054b5414df4c026c.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void gpuMatMul(float * A, float * B, float *C,
int ROW_A, int COL_A, int COL_B);
void mat_mul_cuda_multi(float *A, float *B, float *C,
int ROW_A, int COL_A, int COL_B) {
/******************** TODO *********************/
}
|
b1ae5b785aaacd5a5e22847222cc1232def779ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(Shuffle, LAYER_SHUFFLE_CHANNEL);
__global__ void shuffle_kernel(const int count, const int feature_map_size, const float *input,
float *output, int group_row, int group_column, int len) {
CUDA_KERNEL_LOOP(index, count) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
float* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
Status CudaShuffleLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);
}
Status CudaShuffleLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaShuffleLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
auto param = dynamic_cast<ShuffleLayerParam *>(param_);
if (!param) {
LOGE("Error: ShuffleLayerParam is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: ShuffleLayerParam is nil");
}
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
auto dims = input_blob->GetBlobDesc().dims;
const int num = dims[0];
const int feature_map_size = DimsVectorUtils::Count(dims, 1);
const int sp_sz = DimsVectorUtils::Count(dims, 2);
const int chs = dims[1];
int group_row = param->group;
int group_column = int(chs / group_row);
assert(chs == (group_column * group_row));
int count = DimsVectorUtils::Count(dims);
float* input_data = static_cast<float*>(input_blob->GetHandle().base);
float* output_data = static_cast<float*>(output_blob->GetHandle().base);
hipLaunchKernelGGL(( shuffle_kernel), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(),
count, feature_map_size, input_data, output_data, group_row, group_column, sp_sz);
return TNN_OK;
}
REGISTER_CUDA_ACC(Shuffle, LAYER_SHUFFLE_CHANNEL);
} // namespace TNN_NS
|
b1ae5b785aaacd5a5e22847222cc1232def779ba.cu
|
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(Shuffle, LAYER_SHUFFLE_CHANNEL);
__global__ void shuffle_kernel(const int count, const int feature_map_size, const float *input,
float *output, int group_row, int group_column, int len) {
CUDA_KERNEL_LOOP(index, count) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
float* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
Status CudaShuffleLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);
}
Status CudaShuffleLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaShuffleLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
auto param = dynamic_cast<ShuffleLayerParam *>(param_);
if (!param) {
LOGE("Error: ShuffleLayerParam is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: ShuffleLayerParam is nil");
}
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
auto dims = input_blob->GetBlobDesc().dims;
const int num = dims[0];
const int feature_map_size = DimsVectorUtils::Count(dims, 1);
const int sp_sz = DimsVectorUtils::Count(dims, 2);
const int chs = dims[1];
int group_row = param->group;
int group_column = int(chs / group_row);
assert(chs == (group_column * group_row));
int count = DimsVectorUtils::Count(dims);
float* input_data = static_cast<float*>(input_blob->GetHandle().base);
float* output_data = static_cast<float*>(output_blob->GetHandle().base);
shuffle_kernel<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>(
count, feature_map_size, input_data, output_data, group_row, group_column, sp_sz);
return TNN_OK;
}
REGISTER_CUDA_ACC(Shuffle, LAYER_SHUFFLE_CHANNEL);
} // namespace TNN_NS
|
9061354bc095d9ef29dbcb2a7e083e7beaae9a92.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/functional.h>
#include <thrust/logical.h>
#include <thrust/optional.h>
#include <map>
#include <numeric>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Structure of date/time components
*/
struct timestamp_components {
int16_t year;
int8_t month;
int8_t day;
int16_t day_of_year;
int8_t hour;
int8_t minute;
int8_t second;
int32_t subsecond;
int32_t tz_minutes;
};
enum class format_char_type : int8_t {
literal, ///< literal char type passed through
specifier ///< timestamp format specifier
};
/**
* @brief Represents a format specifier or literal from a timestamp format string.
*
* Created by the format_compiler when parsing a format string.
*/
struct alignas(4) format_item {
format_char_type item_type; // specifier or literal indicator
char value; // specifier or literal value
int8_t length; // item length in bytes
static format_item new_specifier(char format_char, int8_t length)
{
return format_item{format_char_type::specifier, format_char, length};
}
static format_item new_literal(char literal)
{
return format_item{format_char_type::literal, literal, 1};
}
};
/**
* @brief The format-compiler parses a timestamp format string into a vector of
* `format_items`.
*
* The vector of `format_items` is used when parsing a string into timestamp
* components and when formatting a string from timestamp components.
*/
using specifier_map = std::map<char, int8_t>;
struct format_compiler {
std::string const format;
rmm::device_uvector<format_item> d_items;
// clang-format off
// The specifiers are documented here (not all are supported):
// https://en.cppreference.com/w/cpp/chrono/system_clock/formatter
specifier_map specifiers = {
{'Y', 4}, {'y', 2}, {'m', 2}, {'d', 2}, {'H', 2}, {'I', 2}, {'M', 2},
{'S', 2}, {'f', 6}, {'z', 5}, {'Z', 3}, {'p', 2}, {'j', 3}};
// clang-format on
format_compiler(std::string fmt,
rmm::cuda_stream_view stream,
specifier_map extra_specifiers = {})
: format(fmt), d_items(0, stream)
{
specifiers.insert(extra_specifiers.begin(), extra_specifiers.end());
std::vector<format_item> items;
const char* str = format.c_str();
auto length = format.length();
while (length > 0) {
char ch = *str++;
length--;
// first check for a literal character
if (ch != '%') {
items.push_back(format_item::new_literal(ch));
continue;
}
CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format");
ch = *str++;
length--;
if (ch == '%') // escaped % char
{
items.push_back(format_item::new_literal(ch));
continue;
}
if (ch >= '0' && ch <= '9') {
CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str));
specifiers[*str] = static_cast<int8_t>(ch - '0');
ch = *str++;
length--;
}
// check if the specifier found is supported
CUDF_EXPECTS(specifiers.find(ch) != specifiers.end(),
"invalid format specifier: " + std::string(1, ch));
// create the format item for this specifier
items.push_back(format_item::new_specifier(ch, specifiers[ch]));
}
// copy format_items to device memory
d_items = cudf::detail::make_device_uvector_async(items, stream);
}
device_span<format_item const> format_items() { return device_span<format_item const>(d_items); }
int8_t subsecond_precision() const { return specifiers.at('f'); }
};
/**
* @brief Specialized function to return the integer value reading up to the specified
* bytes or until an invalid character is encountered.
*
* @param str Beginning of characters to read.
* @param bytes Number of bytes in str to read.
* @return Integer value of valid characters read and how many bytes were not read.
*/
__device__ thrust::pair<int32_t, size_type> parse_int(char const* str, size_type bytes)
{
int32_t value = 0;
while (bytes-- > 0) {
char chr = *str++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return thrust::make_pair(value, bytes + 1);
}
/**
* @brief This parses date/time characters into a timestamp integer
*
* @tparam T cudf::timestamp type
*/
template <typename T>
struct parse_datetime {
column_device_view const d_strings;
device_span<format_item const> const d_format_items;
int8_t const subsecond_precision;
/**
* @brief Return power of ten value given an exponent.
*
* @return `1x10^exponent` for `0 <= exponent <= 9`
*/
__device__ constexpr int64_t power_of_ten(int32_t const exponent) const
{
constexpr int64_t powers_of_ten[] = {
1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L};
return powers_of_ten[exponent];
}
// Walk the format_items to parse the string into date/time components
__device__ timestamp_components parse_into_parts(string_view const& d_string) const
{
timestamp_components timeparts = {1970, 1, 1, 0}; // init to epoch time
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (auto item : d_format_items) {
if (item.value != 'f')
item.length = static_cast<int8_t>(::min(static_cast<size_type>(item.length), length));
if (item.item_type == format_char_type::literal) {
// static character we'll just skip;
// consume item.length bytes from the input string
ptr += item.length;
length -= item.length;
continue;
}
size_type bytes_read = item.length; // number of bytes processed
// special logic for each specifier
switch (item.value) {
case 'Y': {
auto const [year, left] = parse_int(ptr, item.length);
timeparts.year = static_cast<int16_t>(year);
bytes_read -= left;
break;
}
case 'y': {
auto const [year, left] = parse_int(ptr, item.length);
timeparts.year = static_cast<int16_t>(year + (year < 69 ? 2000 : 1900));
bytes_read -= left;
break;
}
case 'm': {
auto const [month, left] = parse_int(ptr, item.length);
timeparts.month = static_cast<int8_t>(month);
bytes_read -= left;
break;
}
case 'd': {
auto const [day, left] = parse_int(ptr, item.length);
timeparts.day = static_cast<int8_t>(day);
bytes_read -= left;
break;
}
case 'j': {
auto const [day, left] = parse_int(ptr, item.length);
timeparts.day_of_year = static_cast<int16_t>(day);
bytes_read -= left;
break;
}
case 'H':
case 'I': {
auto const [hour, left] = parse_int(ptr, item.length);
timeparts.hour = static_cast<int8_t>(hour);
bytes_read -= left;
break;
}
case 'M': {
auto const [minute, left] = parse_int(ptr, item.length);
timeparts.minute = static_cast<int8_t>(minute);
bytes_read -= left;
break;
}
case 'S': {
auto const [second, left] = parse_int(ptr, item.length);
timeparts.second = static_cast<int8_t>(second);
bytes_read -= left;
break;
}
case 'f': {
int32_t const read_size =
::min(static_cast<int32_t>(item.length), static_cast<int32_t>(length));
auto const [fraction, left] = parse_int(ptr, read_size);
timeparts.subsecond =
static_cast<int32_t>(fraction * power_of_ten(item.length - read_size - left));
bytes_read = read_size - left;
break;
}
case 'p': {
string_view am_pm(ptr, 2);
auto hour = timeparts.hour;
if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) {
if (hour == 12) hour = 0;
} else if (hour < 12)
hour += 12;
timeparts.hour = hour;
break;
}
case 'z': {
// 'z' format is +hh:mm -- single sign char and 2 chars each for hour and minute
auto const sign = *ptr == '-' ? 1 : -1;
auto const [hh, lh] = parse_int(ptr + 1, 2);
auto const [mm, lm] = parse_int(ptr + 3, 2);
// revert timezone back to UTC
timeparts.tz_minutes = sign * ((hh * 60) + mm);
bytes_read -= lh + lm;
break;
}
case 'Z': break; // skip
default: break;
}
ptr += bytes_read;
length -= bytes_read;
}
return timeparts;
}
__device__ int64_t timestamp_from_parts(timestamp_components const& timeparts) const
{
auto const ymd = // convenient chrono class handles the leap year calculations for us
cuda::std::chrono::year_month_day(
cuda::std::chrono::year{timeparts.year},
cuda::std::chrono::month{static_cast<uint32_t>(timeparts.month)},
cuda::std::chrono::day{static_cast<uint32_t>(timeparts.day)});
auto const days = cuda::std::chrono::sys_days(ymd).time_since_epoch().count();
if constexpr (std::is_same_v<T, cudf::timestamp_D>) { return days; }
int64_t timestamp = (days * 24L * 3600L) + (timeparts.hour * 3600L) + (timeparts.minute * 60L) +
timeparts.second + (timeparts.tz_minutes * 60L);
if constexpr (std::is_same_v<T, cudf::timestamp_s>) { return timestamp; }
int64_t const subsecond =
(timeparts.subsecond * power_of_ten(9 - subsecond_precision)) / // normalize to nanoseconds
(1000000000L / T::period::type::den); // and rescale to T
timestamp *= T::period::type::den;
timestamp += subsecond;
return timestamp;
}
__device__ T operator()(size_type idx) const
{
T epoch_time{typename T::duration{0}};
if (d_strings.is_null(idx)) return epoch_time;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return epoch_time;
auto const timeparts = parse_into_parts(d_str);
return T{T::duration(timestamp_from_parts(timeparts))};
}
};
/**
* @brief Type-dispatch operator to convert timestamp strings to native fixed-width-type
*/
struct dispatch_to_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_strings,
std::string const& format,
mutable_column_view& results_view,
rmm::cuda_stream_view stream) const
{
format_compiler compiler(format, stream);
parse_datetime<T> pfn{d_strings, compiler.format_items(), compiler.subsecond_precision()};
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(results_view.size()),
results_view.data<T>(),
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
std::string const&,
mutable_column_view&,
rmm::cuda_stream_view) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& input,
data_type timestamp_type,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty())
return make_empty_column(timestamp_type); // make_timestamp_column(timestamp_type, 0);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
auto d_strings = column_device_view::create(input.parent(), stream);
auto results = make_timestamp_column(timestamp_type,
input.size(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
cudf::type_dispatcher(
timestamp_type, dispatch_to_timestamps_fn(), *d_strings, format, results_view, stream);
results->set_null_count(input.null_count());
return results;
}
/**
* @brief Functor checks the strings against the given format items.
*
* This does no data conversion.
*/
struct check_datetime_format {
column_device_view const d_strings;
device_span<format_item const> const d_format_items;
/**
* @brief Check the specified characters are between ['0','9'].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @return true if all digits are 0-9
*/
__device__ bool check_digits(const char* str, size_type bytes)
{
return thrust::all_of(thrust::seq, str, str + bytes, [] __device__(char chr) {
return (chr >= '0' && chr <= '9');
});
}
/**
* @brief Check the specified characters are between ['0','9']
* and the resulting integer is within [`min_value`, `max_value`].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @param min_value Inclusive minimum value
* @param max_value Inclusive maximum value
* @return If value is valid and number of bytes not successfully processed
*/
__device__ thrust::pair<bool, size_type> check_value(char const* str,
size_type const bytes,
int const min_value,
int const max_value)
{
if (*str < '0' || *str > '9') { return thrust::make_pair(false, bytes); }
int32_t value = 0;
size_type count = bytes;
while (count-- > 0) {
char chr = *str++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return (value >= min_value && value <= max_value) ? thrust::make_pair(true, count + 1)
: thrust::make_pair(false, bytes);
}
/**
* @brief Check the string matches the format.
*
* Walk the `format_items` as we read the string characters
* checking the characters are valid for each format specifier.
* The checking here is a little more strict than the actual
* parser used for conversion.
*/
__device__ thrust::optional<timestamp_components> check_string(string_view const& d_string)
{
timestamp_components dateparts = {1970, 1, 1, 0}; // init to epoch time
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (auto item : d_format_items) {
// eliminate static character values first
if (item.item_type == format_char_type::literal) {
// check static character matches
if (*ptr != item.value) return thrust::nullopt;
ptr += item.length;
length -= item.length;
continue;
}
// allow for specifiers to be truncated
if (item.value != 'f')
item.length = static_cast<int8_t>(::min(static_cast<size_type>(item.length), length));
// special logic for each specifier
// reference: https://man7.org/linux/man-pages/man3/strptime.3.html
bool result = false;
size_type bytes_read = item.length;
switch (item.value) {
case 'Y': {
auto const [year, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.year = static_cast<int16_t>(year);
bytes_read -= left;
break;
}
case 'y': {
auto const [year, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.year = static_cast<int16_t>(year + (year < 69 ? 2000 : 1900));
bytes_read -= left;
break;
}
case 'm': {
auto const [month, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.month = static_cast<int8_t>(month);
bytes_read -= left;
break;
}
case 'd': {
auto const [day, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.day = static_cast<int8_t>(day); // value.value()
bytes_read -= left;
break;
}
case 'j': {
auto const cv = check_value(ptr, item.length, 1, 366);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'H': {
auto const cv = check_value(ptr, item.length, 0, 23);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'I': {
auto const cv = check_value(ptr, item.length, 1, 12);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'M': {
auto const cv = check_value(ptr, item.length, 0, 59);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'S': {
auto const cv = check_value(ptr, item.length, 0, 60);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'f': {
int32_t const read_size =
::min(static_cast<int32_t>(item.length), static_cast<int32_t>(length));
result = check_digits(ptr, read_size);
bytes_read = read_size;
break;
}
case 'p': {
if (item.length == 2) {
string_view am_pm(ptr, 2);
result = (am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0) ||
(am_pm.compare("PM", 2) == 0) || (am_pm.compare("pm", 2) == 0);
}
break;
}
case 'z': { // timezone offset
if (item.length == 5) {
auto const cvh = check_value(ptr + 1, 2, 0, 23);
auto const cvm = check_value(ptr + 3, 2, 0, 59);
result = (*ptr == '-' || *ptr == '+') && cvh.first && cvm.first;
bytes_read -= cvh.second + cvm.second;
}
break;
}
case 'Z': result = true; // skip
default: break;
}
if (!result) return thrust::nullopt;
ptr += bytes_read;
length -= bytes_read;
}
return dateparts;
}
__device__ bool operator()(size_type idx)
{
if (d_strings.is_null(idx)) return false;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return false;
auto const dateparts = check_string(d_str);
if (!dateparts.has_value()) return false;
auto const year = dateparts.value().year;
auto const month = static_cast<uint32_t>(dateparts.value().month);
auto const day = static_cast<uint32_t>(dateparts.value().day);
return cuda::std::chrono::year_month_day(cuda::std::chrono::year{year},
cuda::std::chrono::month{month},
cuda::std::chrono::day{day})
.ok();
}
};
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& input,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = input.size();
if (strings_count == 0) return make_empty_column(type_id::BOOL8);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
auto d_strings = column_device_view::create(input.parent(), stream);
auto results = make_numeric_column(data_type{type_id::BOOL8},
strings_count,
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
format_compiler compiler(format, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
check_datetime_format{*d_strings, compiler.format_items()});
results->set_null_count(input.null_count());
return results;
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& input,
data_type timestamp_type,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_timestamps(input, timestamp_type, format, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& input,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_timestamp(input, format, rmm::cuda_stream_default, mr);
}
namespace detail {
namespace {
constexpr size_type format_names_size = 40; // 2(am/pm) + 2x7(weekdays) + 2x12(months)
constexpr size_type offset_weekdays = 2;
constexpr size_type offset_months = 16;
constexpr size_type days_in_week = 7;
constexpr size_type months_in_year = 12;
/**
* @brief Time components used by the date_time_formatter
*/
struct time_components {
int8_t hour;
int8_t minute;
int8_t second;
int32_t subsecond;
};
/**
* @brief Base class for the `from_timestamps_size_fn` and the `date_time_formatter`
*
* These contain some common utility functions used by both subclasses.
*/
template <typename T>
struct from_timestamp_base {
/**
* @brief Specialized modulo expression that handles negative values.
*
* @code{.pseudo}
* Examples:
* modulo(1,60) -> 1
* modulo(-1,60) -> 59
* @endcode
*/
__device__ int32_t modulo_time(int64_t time, int64_t base) const
{
return static_cast<int32_t>(((time % base) + base) % base);
};
/**
* @brief This function handles converting units by dividing and adjusting for negative values.
*
* @code{.pseudo}
* Examples:
* scale(-61,60) -> -2
* scale(-60,60) -> -1
* scale(-59,60) -> -1
* scale( 59,60) -> 0
* scale( 60,60) -> 1
* scale( 61,60) -> 1
* @endcode
*/
__device__ int32_t scale_time(int64_t time, int64_t base) const
{
return static_cast<int32_t>((time - ((time < 0) * (base - 1L))) / base);
};
__device__ time_components get_time_components(int64_t tstamp) const
{
time_components result = {0};
if constexpr (std::is_same_v<T, cudf::timestamp_D>) { return result; }
// Note: Tried using: cuda::std::chrono::hh_mm_ss(T::duration(timestamp));
// and retrieving the hour, minute, second, and subsecond values from it
// but it did not scale/modulo the components for negative timestamps
// correctly -- it simply did an abs(timestamp) as documented here:
// https://en.cppreference.com/w/cpp/chrono/hh_mm_ss/hh_mm_ss
if constexpr (not std::is_same_v<T, cudf::timestamp_s>) {
int64_t constexpr base = T::period::type::den; // 1000=ms, 1000000=us, etc
auto const subsecond = modulo_time(tstamp, base);
tstamp = tstamp / base - ((tstamp < 0) and (subsecond != 0));
result.subsecond = subsecond;
}
result.hour = modulo_time(scale_time(tstamp, 3600), 24);
result.minute = modulo_time(scale_time(tstamp, 60), 60);
result.second = modulo_time(tstamp, 60);
return result;
}
};
template <typename T>
struct from_timestamps_size_fn : public from_timestamp_base<T> {
column_device_view const d_timestamps;
column_device_view const d_format_names;
device_span<format_item const> const d_format_items;
from_timestamps_size_fn(column_device_view const& d_timestamps,
column_device_view const& d_format_names,
device_span<format_item const> const& d_format_items)
: d_timestamps(d_timestamps), d_format_names(d_format_names), d_format_items(d_format_items)
{
}
__device__ size_type operator()(size_type idx) const
{
if (d_timestamps.is_null(idx)) { return 0; }
// We only dissect the timestamp into components if needed
// by a specifier. And then we only do it once and reuse it.
// This can improve performance when not using uncommon specifiers.
thrust::optional<cuda::std::chrono::sys_days> days;
auto days_from_timestamp = [&]() {
auto const tstamp = d_timestamps.element<T>(idx).time_since_epoch().count();
return cuda::std::chrono::sys_days(static_cast<cudf::timestamp_D::duration>(
floor<cuda::std::chrono::days>(T::duration(tstamp))));
};
size_type bytes = 0; // output size
for (auto item : d_format_items) {
if (item.item_type == format_char_type::literal) {
bytes += item.length;
continue;
}
// only specifiers resulting in strings require special logic
switch (item.value) {
case 'a': // weekday abbreviated
case 'A': { // weekday full name
if (!days.has_value()) { days = days_from_timestamp(); }
auto const day_of_week =
cuda::std::chrono::year_month_weekday(days.value()).weekday().c_encoding();
auto const day_idx =
day_of_week + offset_weekdays + (item.value == 'a' ? days_in_week : 0);
if (day_idx < d_format_names.size())
bytes += d_format_names.element<cudf::string_view>(day_idx).size_bytes();
break;
}
case 'b': // month abbreviated
case 'B': { // month full name
if (!days.has_value()) { days = days_from_timestamp(); }
auto const month =
static_cast<uint32_t>(cuda::std::chrono::year_month_day(days.value()).month());
auto const month_idx =
month - 1 + offset_months + (item.value == 'b' ? months_in_year : 0);
if (month_idx < d_format_names.size())
bytes += d_format_names.element<cudf::string_view>(month_idx).size_bytes();
break;
}
case 'p': // AM/PM
{
auto times = get_time_components(d_timestamps.element<T>(idx).time_since_epoch().count());
bytes += d_format_names.size() > 1
? d_format_names.element<cudf::string_view>(static_cast<int>(times.hour >= 12))
.size_bytes()
: 2;
break;
}
default: {
bytes += item.length;
break;
}
}
}
return bytes;
}
};
// converts a timestamp into date-time formatted string
template <typename T>
struct datetime_formatter : public from_timestamp_base<T> {
column_device_view const d_timestamps;
column_device_view const d_format_names;
device_span<format_item const> const d_format_items;
int32_t const* d_offsets{};
char* d_chars{};
datetime_formatter(column_device_view const& d_timestamps,
column_device_view const& d_format_names,
device_span<format_item const> const& d_format_items,
int32_t const* d_offsets,
char* d_chars)
: d_timestamps(d_timestamps),
d_format_names(d_format_names),
d_format_items(d_format_items),
d_offsets(d_offsets),
d_chars(d_chars)
{
}
// utility to create 0-padded integers (up to 9 chars)
__device__ char* int2str(char* str, int bytes, int val)
{
char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
char* ptr = tmpl;
while (val > 0) {
int digit = val % 10;
*ptr++ = '0' + digit;
val = val / 10;
}
ptr = tmpl + bytes - 1;
while (bytes-- > 0)
*str++ = *ptr--;
return str;
}
// from https://howardhinnant.github.io/date/date.html
__device__ thrust::pair<int32_t, int32_t> get_iso_week_year(
cuda::std::chrono::year_month_day const& ymd) const
{
auto const days = cuda::std::chrono::sys_days(ymd);
auto year = ymd.year();
auto iso_week_start = [](cuda::std::chrono::year const y) {
// clang-format off
return cuda::std::chrono::sys_days{cuda::std::chrono::Thursday[1]/cuda::std::chrono::January/y} -
(cuda::std::chrono::Thursday - cuda::std::chrono::Monday);
// clang-format on
};
auto start = iso_week_start(year);
if (days < start)
start = iso_week_start(--year);
else {
auto const next_start = iso_week_start(year + cuda::std::chrono::years{1});
if (days >= next_start) {
++year;
start = next_start;
}
}
return thrust::make_pair(
(cuda::std::chrono::duration_cast<cuda::std::chrono::weeks>(days - start) +
cuda::std::chrono::weeks{1}) // always [1-53]
.count(),
static_cast<int32_t>(year));
}
__device__ int8_t get_week_of_year(cuda::std::chrono::sys_days const days,
cuda::std::chrono::sys_days const start) const
{
return days < start
? 0
: (cuda::std::chrono::duration_cast<cuda::std::chrono::weeks>(days - start) +
cuda::std::chrono::weeks{1})
.count();
}
__device__ int32_t get_day_of_year(cuda::std::chrono::year_month_day const& ymd)
{
auto const month = static_cast<uint32_t>(ymd.month());
auto const day = static_cast<uint32_t>(ymd.day());
int32_t const monthDayOffset[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
return static_cast<int32_t>(day + monthDayOffset[month - 1] +
(month > 2 and ymd.year().is_leap()));
}
__device__ void operator()(size_type idx)
{
if (d_timestamps.is_null(idx)) return;
auto tstamp = d_timestamps.element<T>(idx).time_since_epoch().count();
auto const days = cuda::std::chrono::sys_days(static_cast<cudf::timestamp_D::duration>(
cuda::std::chrono::floor<cuda::std::chrono::days>(T::duration(tstamp))));
auto const ymd = cuda::std::chrono::year_month_day(days);
auto timeparts = get_time_components(tstamp);
// convert to characters using the format items
auto ptr = d_chars + d_offsets[idx];
for (auto item : d_format_items) {
if (item.item_type == format_char_type::literal) {
*ptr++ = item.value;
continue;
}
// Value to use for int2str call at the end of the switch-statement.
// This simplifies the case statements and prevents a lot of extra inlining.
int32_t copy_value = -1; // default set for non-int2str usage cases
// special logic for each specifier
switch (item.value) {
case 'Y': // 4-digit year
copy_value = static_cast<int32_t>(ymd.year());
break;
case 'y': // 2-digit year
{
auto year = static_cast<int32_t>(ymd.year());
// remove hundredths digits and above
copy_value = year - ((year / 100) * 100);
break;
}
case 'm': // month
copy_value = static_cast<int32_t>(static_cast<uint32_t>(ymd.month()));
break;
case 'd': // day of month
copy_value = static_cast<int32_t>(static_cast<uint32_t>(ymd.day()));
break;
case 'j': // day of year
copy_value = get_day_of_year(ymd);
break;
case 'H': // 24-hour
copy_value = timeparts.hour;
break;
case 'I': // 12-hour
{
// 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm
copy_value = [h = timeparts.hour] {
if (h == 0) return 12;
return h > 12 ? h - 12 : h;
}();
break;
}
case 'M': // minute
copy_value = timeparts.minute;
break;
case 'S': // second
copy_value = timeparts.second;
break;
case 'f': // sub-second
{
char subsecond_digits[] = "000000000"; // 9 max digits
const int digits = [] {
if constexpr (std::is_same_v<T, cudf::timestamp_ms>) return 3;
if constexpr (std::is_same_v<T, cudf::timestamp_us>) return 6;
if constexpr (std::is_same_v<T, cudf::timestamp_ns>) return 9;
return 0;
}();
int2str(subsecond_digits, digits, timeparts.subsecond);
ptr = copy_and_increment(ptr, subsecond_digits, item.length);
break;
}
case 'p': // am or pm
{
// 0 = 12am, 12 = 12pm
auto const am_pm = [&] {
if (d_format_names.size() > 1)
return d_format_names.element<cudf::string_view>(
static_cast<int>(timeparts.hour >= 12));
return string_view(timeparts.hour >= 12 ? "PM" : "AM", 2);
}();
ptr = copy_string(ptr, am_pm);
break;
}
case 'z': // timezone -- always UTC
ptr = copy_and_increment(ptr, "+0000", 5);
break;
case 'Z': // timezone string -- always UTC
ptr = copy_and_increment(ptr, "UTC", 3);
break;
case 'u': // day of week ISO
case 'w': { // day of week non-ISO
auto const day_of_week = static_cast<int32_t>(
cuda::std::chrono::year_month_weekday(days).weekday().c_encoding());
copy_value = day_of_week == 0 && item.value == 'u' ? 7 : day_of_week;
break;
}
// clang-format off
case 'U': { // week of year: first week includes the first Sunday of the year
copy_value = get_week_of_year(days, cuda::std::chrono::sys_days{
cuda::std::chrono::Sunday[1]/cuda::std::chrono::January/ymd.year()});
break;
}
case 'W': { // week of year: first week includes the first Monday of the year
copy_value = get_week_of_year(days, cuda::std::chrono::sys_days{
cuda::std::chrono::Monday[1]/cuda::std::chrono::January/ymd.year()});
break;
}
// clang-format on
case 'V': // ISO week number
case 'G': { // ISO year number
auto const [week, year] = get_iso_week_year(ymd);
copy_value = item.value == 'G' ? year : week;
break;
}
case 'a': // abbreviated day of the week
case 'A': { // day of the week
auto const day_of_week =
cuda::std::chrono::year_month_weekday(days).weekday().c_encoding();
auto const day_idx =
day_of_week + offset_weekdays + (item.value == 'a' ? days_in_week : 0);
if (d_format_names.size())
ptr = copy_string(ptr, d_format_names.element<cudf::string_view>(day_idx));
break;
}
case 'b': // abbreviated month of the year
case 'B': { // month of the year
auto const month = static_cast<uint32_t>(ymd.month());
auto const month_idx =
month - 1 + offset_months + (item.value == 'b' ? months_in_year : 0);
if (d_format_names.size())
ptr = copy_string(ptr, d_format_names.element<cudf::string_view>(month_idx));
break;
}
default: break;
}
if (copy_value >= 0) ptr = int2str(ptr, item.length, copy_value);
}
}
};
//
using strings_children = std::pair<std::unique_ptr<cudf::column>, std::unique_ptr<cudf::column>>;
struct dispatch_from_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
strings_children operator()(column_device_view const& d_timestamps,
column_device_view const& d_format_names,
device_span<format_item const> d_format_items,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
size_type const strings_count = d_timestamps.size();
// build offsets column
auto offsets_transformer_itr = cudf::detail::make_counting_transform_iterator(
0, from_timestamps_size_fn<T>{d_timestamps, d_format_names, d_format_items});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_offsets = offsets_column->mutable_view().template data<offset_type>();
// build chars column
auto const bytes =
cudf::detail::get_value<offset_type>(offsets_column->view(), strings_count, stream);
auto chars_column = create_chars_child_column(bytes, stream, mr);
auto d_chars = chars_column->mutable_view().template data<char>();
datetime_formatter<T> pfn{d_timestamps, d_format_names, d_format_items, d_offsets, d_chars};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
d_timestamps.size(),
pfn);
return std::make_pair(std::move(offsets_column), std::move(chars_column));
}
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_timestamp<T>(), strings_children> operator()(Args&&...) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
strings_column_view const& names,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (timestamps.is_empty()) return make_empty_column(type_id::STRING);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
CUDF_EXPECTS(names.is_empty() || names.size() == format_names_size,
"Invalid size for format names.");
auto const d_names = column_device_view::create(names.parent(), stream);
// This API supports a few more specifiers than to_timestamps.
// clang-format off
format_compiler compiler(format, stream,
specifier_map{{'w', 1}, {'W', 2}, {'u', 1}, {'U', 2}, {'V', 2}, {'G', 4},
{'a', 3}, {'A', 3}, {'b', 3}, {'B', 3}});
// clang-format on
auto const d_format_items = compiler.format_items();
auto const d_timestamps = column_device_view::create(timestamps, stream);
// dispatcher is called to handle the different timestamp types
auto [offsets_column, chars_column] = cudf::type_dispatcher(timestamps.type(),
dispatch_from_timestamps_fn(),
*d_timestamps,
*d_names,
d_format_items,
stream,
mr);
return make_strings_column(timestamps.size(),
std::move(offsets_column),
std::move(chars_column),
timestamps.null_count(),
cudf::detail::copy_bitmask(timestamps, stream, mr));
}
} // namespace detail
// external API
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
strings_column_view const& names,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_timestamps(timestamps, format, names, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
9061354bc095d9ef29dbcb2a7e083e7beaae9a92.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/functional.h>
#include <thrust/logical.h>
#include <thrust/optional.h>
#include <map>
#include <numeric>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Structure of date/time components
*/
struct timestamp_components {
int16_t year;
int8_t month;
int8_t day;
int16_t day_of_year;
int8_t hour;
int8_t minute;
int8_t second;
int32_t subsecond;
int32_t tz_minutes;
};
enum class format_char_type : int8_t {
literal, ///< literal char type passed through
specifier ///< timestamp format specifier
};
/**
* @brief Represents a format specifier or literal from a timestamp format string.
*
* Created by the format_compiler when parsing a format string.
*/
struct alignas(4) format_item {
format_char_type item_type; // specifier or literal indicator
char value; // specifier or literal value
int8_t length; // item length in bytes
static format_item new_specifier(char format_char, int8_t length)
{
return format_item{format_char_type::specifier, format_char, length};
}
static format_item new_literal(char literal)
{
return format_item{format_char_type::literal, literal, 1};
}
};
/**
* @brief The format-compiler parses a timestamp format string into a vector of
* `format_items`.
*
* The vector of `format_items` is used when parsing a string into timestamp
* components and when formatting a string from timestamp components.
*/
using specifier_map = std::map<char, int8_t>;
struct format_compiler {
std::string const format;
rmm::device_uvector<format_item> d_items;
// clang-format off
// The specifiers are documented here (not all are supported):
// https://en.cppreference.com/w/cpp/chrono/system_clock/formatter
specifier_map specifiers = {
{'Y', 4}, {'y', 2}, {'m', 2}, {'d', 2}, {'H', 2}, {'I', 2}, {'M', 2},
{'S', 2}, {'f', 6}, {'z', 5}, {'Z', 3}, {'p', 2}, {'j', 3}};
// clang-format on
format_compiler(std::string fmt,
rmm::cuda_stream_view stream,
specifier_map extra_specifiers = {})
: format(fmt), d_items(0, stream)
{
specifiers.insert(extra_specifiers.begin(), extra_specifiers.end());
std::vector<format_item> items;
const char* str = format.c_str();
auto length = format.length();
while (length > 0) {
char ch = *str++;
length--;
// first check for a literal character
if (ch != '%') {
items.push_back(format_item::new_literal(ch));
continue;
}
CUDF_EXPECTS(length > 0, "Unfinished specifier in timestamp format");
ch = *str++;
length--;
if (ch == '%') // escaped % char
{
items.push_back(format_item::new_literal(ch));
continue;
}
if (ch >= '0' && ch <= '9') {
CUDF_EXPECTS(*str == 'f', "precision not supported for specifier: " + std::string(1, *str));
specifiers[*str] = static_cast<int8_t>(ch - '0');
ch = *str++;
length--;
}
// check if the specifier found is supported
CUDF_EXPECTS(specifiers.find(ch) != specifiers.end(),
"invalid format specifier: " + std::string(1, ch));
// create the format item for this specifier
items.push_back(format_item::new_specifier(ch, specifiers[ch]));
}
// copy format_items to device memory
d_items = cudf::detail::make_device_uvector_async(items, stream);
}
device_span<format_item const> format_items() { return device_span<format_item const>(d_items); }
int8_t subsecond_precision() const { return specifiers.at('f'); }
};
/**
* @brief Specialized function to return the integer value reading up to the specified
* bytes or until an invalid character is encountered.
*
* @param str Beginning of characters to read.
* @param bytes Number of bytes in str to read.
* @return Integer value of valid characters read and how many bytes were not read.
*/
__device__ thrust::pair<int32_t, size_type> parse_int(char const* str, size_type bytes)
{
int32_t value = 0;
while (bytes-- > 0) {
char chr = *str++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return thrust::make_pair(value, bytes + 1);
}
/**
* @brief This parses date/time characters into a timestamp integer
*
* @tparam T cudf::timestamp type
*/
template <typename T>
struct parse_datetime {
column_device_view const d_strings;
device_span<format_item const> const d_format_items;
int8_t const subsecond_precision;
/**
* @brief Return power of ten value given an exponent.
*
* @return `1x10^exponent` for `0 <= exponent <= 9`
*/
__device__ constexpr int64_t power_of_ten(int32_t const exponent) const
{
constexpr int64_t powers_of_ten[] = {
1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L};
return powers_of_ten[exponent];
}
// Walk the format_items to parse the string into date/time components
__device__ timestamp_components parse_into_parts(string_view const& d_string) const
{
timestamp_components timeparts = {1970, 1, 1, 0}; // init to epoch time
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (auto item : d_format_items) {
if (item.value != 'f')
item.length = static_cast<int8_t>(std::min(static_cast<size_type>(item.length), length));
if (item.item_type == format_char_type::literal) {
// static character we'll just skip;
// consume item.length bytes from the input string
ptr += item.length;
length -= item.length;
continue;
}
size_type bytes_read = item.length; // number of bytes processed
// special logic for each specifier
switch (item.value) {
case 'Y': {
auto const [year, left] = parse_int(ptr, item.length);
timeparts.year = static_cast<int16_t>(year);
bytes_read -= left;
break;
}
case 'y': {
auto const [year, left] = parse_int(ptr, item.length);
timeparts.year = static_cast<int16_t>(year + (year < 69 ? 2000 : 1900));
bytes_read -= left;
break;
}
case 'm': {
auto const [month, left] = parse_int(ptr, item.length);
timeparts.month = static_cast<int8_t>(month);
bytes_read -= left;
break;
}
case 'd': {
auto const [day, left] = parse_int(ptr, item.length);
timeparts.day = static_cast<int8_t>(day);
bytes_read -= left;
break;
}
case 'j': {
auto const [day, left] = parse_int(ptr, item.length);
timeparts.day_of_year = static_cast<int16_t>(day);
bytes_read -= left;
break;
}
case 'H':
case 'I': {
auto const [hour, left] = parse_int(ptr, item.length);
timeparts.hour = static_cast<int8_t>(hour);
bytes_read -= left;
break;
}
case 'M': {
auto const [minute, left] = parse_int(ptr, item.length);
timeparts.minute = static_cast<int8_t>(minute);
bytes_read -= left;
break;
}
case 'S': {
auto const [second, left] = parse_int(ptr, item.length);
timeparts.second = static_cast<int8_t>(second);
bytes_read -= left;
break;
}
case 'f': {
int32_t const read_size =
std::min(static_cast<int32_t>(item.length), static_cast<int32_t>(length));
auto const [fraction, left] = parse_int(ptr, read_size);
timeparts.subsecond =
static_cast<int32_t>(fraction * power_of_ten(item.length - read_size - left));
bytes_read = read_size - left;
break;
}
case 'p': {
string_view am_pm(ptr, 2);
auto hour = timeparts.hour;
if ((am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0)) {
if (hour == 12) hour = 0;
} else if (hour < 12)
hour += 12;
timeparts.hour = hour;
break;
}
case 'z': {
// 'z' format is +hh:mm -- single sign char and 2 chars each for hour and minute
auto const sign = *ptr == '-' ? 1 : -1;
auto const [hh, lh] = parse_int(ptr + 1, 2);
auto const [mm, lm] = parse_int(ptr + 3, 2);
// revert timezone back to UTC
timeparts.tz_minutes = sign * ((hh * 60) + mm);
bytes_read -= lh + lm;
break;
}
case 'Z': break; // skip
default: break;
}
ptr += bytes_read;
length -= bytes_read;
}
return timeparts;
}
__device__ int64_t timestamp_from_parts(timestamp_components const& timeparts) const
{
auto const ymd = // convenient chrono class handles the leap year calculations for us
cuda::std::chrono::year_month_day(
cuda::std::chrono::year{timeparts.year},
cuda::std::chrono::month{static_cast<uint32_t>(timeparts.month)},
cuda::std::chrono::day{static_cast<uint32_t>(timeparts.day)});
auto const days = cuda::std::chrono::sys_days(ymd).time_since_epoch().count();
if constexpr (std::is_same_v<T, cudf::timestamp_D>) { return days; }
int64_t timestamp = (days * 24L * 3600L) + (timeparts.hour * 3600L) + (timeparts.minute * 60L) +
timeparts.second + (timeparts.tz_minutes * 60L);
if constexpr (std::is_same_v<T, cudf::timestamp_s>) { return timestamp; }
int64_t const subsecond =
(timeparts.subsecond * power_of_ten(9 - subsecond_precision)) / // normalize to nanoseconds
(1000000000L / T::period::type::den); // and rescale to T
timestamp *= T::period::type::den;
timestamp += subsecond;
return timestamp;
}
__device__ T operator()(size_type idx) const
{
T epoch_time{typename T::duration{0}};
if (d_strings.is_null(idx)) return epoch_time;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return epoch_time;
auto const timeparts = parse_into_parts(d_str);
return T{T::duration(timestamp_from_parts(timeparts))};
}
};
/**
* @brief Type-dispatch operator to convert timestamp strings to native fixed-width-type
*/
struct dispatch_to_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const& d_strings,
std::string const& format,
mutable_column_view& results_view,
rmm::cuda_stream_view stream) const
{
format_compiler compiler(format, stream);
parse_datetime<T> pfn{d_strings, compiler.format_items(), compiler.subsecond_precision()};
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(results_view.size()),
results_view.data<T>(),
pfn);
}
template <typename T, std::enable_if_t<not cudf::is_timestamp<T>()>* = nullptr>
void operator()(column_device_view const&,
std::string const&,
mutable_column_view&,
rmm::cuda_stream_view) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& input,
data_type timestamp_type,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty())
return make_empty_column(timestamp_type); // make_timestamp_column(timestamp_type, 0);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
auto d_strings = column_device_view::create(input.parent(), stream);
auto results = make_timestamp_column(timestamp_type,
input.size(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
cudf::type_dispatcher(
timestamp_type, dispatch_to_timestamps_fn(), *d_strings, format, results_view, stream);
results->set_null_count(input.null_count());
return results;
}
/**
* @brief Functor checks the strings against the given format items.
*
* This does no data conversion.
*/
struct check_datetime_format {
column_device_view const d_strings;
device_span<format_item const> const d_format_items;
/**
* @brief Check the specified characters are between ['0','9'].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @return true if all digits are 0-9
*/
__device__ bool check_digits(const char* str, size_type bytes)
{
return thrust::all_of(thrust::seq, str, str + bytes, [] __device__(char chr) {
return (chr >= '0' && chr <= '9');
});
}
/**
* @brief Check the specified characters are between ['0','9']
* and the resulting integer is within [`min_value`, `max_value`].
*
* @param str Beginning of characters to check.
* @param bytes Number of bytes to check.
* @param min_value Inclusive minimum value
* @param max_value Inclusive maximum value
* @return If value is valid and number of bytes not successfully processed
*/
__device__ thrust::pair<bool, size_type> check_value(char const* str,
size_type const bytes,
int const min_value,
int const max_value)
{
if (*str < '0' || *str > '9') { return thrust::make_pair(false, bytes); }
int32_t value = 0;
size_type count = bytes;
while (count-- > 0) {
char chr = *str++;
if (chr < '0' || chr > '9') break;
value = (value * 10) + static_cast<int32_t>(chr - '0');
}
return (value >= min_value && value <= max_value) ? thrust::make_pair(true, count + 1)
: thrust::make_pair(false, bytes);
}
/**
* @brief Check the string matches the format.
*
* Walk the `format_items` as we read the string characters
* checking the characters are valid for each format specifier.
* The checking here is a little more strict than the actual
* parser used for conversion.
*/
__device__ thrust::optional<timestamp_components> check_string(string_view const& d_string)
{
timestamp_components dateparts = {1970, 1, 1, 0}; // init to epoch time
auto ptr = d_string.data();
auto length = d_string.size_bytes();
for (auto item : d_format_items) {
// eliminate static character values first
if (item.item_type == format_char_type::literal) {
// check static character matches
if (*ptr != item.value) return thrust::nullopt;
ptr += item.length;
length -= item.length;
continue;
}
// allow for specifiers to be truncated
if (item.value != 'f')
item.length = static_cast<int8_t>(std::min(static_cast<size_type>(item.length), length));
// special logic for each specifier
// reference: https://man7.org/linux/man-pages/man3/strptime.3.html
bool result = false;
size_type bytes_read = item.length;
switch (item.value) {
case 'Y': {
auto const [year, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.year = static_cast<int16_t>(year);
bytes_read -= left;
break;
}
case 'y': {
auto const [year, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.year = static_cast<int16_t>(year + (year < 69 ? 2000 : 1900));
bytes_read -= left;
break;
}
case 'm': {
auto const [month, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.month = static_cast<int8_t>(month);
bytes_read -= left;
break;
}
case 'd': {
auto const [day, left] = parse_int(ptr, item.length);
result = (left < item.length);
dateparts.day = static_cast<int8_t>(day); // value.value()
bytes_read -= left;
break;
}
case 'j': {
auto const cv = check_value(ptr, item.length, 1, 366);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'H': {
auto const cv = check_value(ptr, item.length, 0, 23);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'I': {
auto const cv = check_value(ptr, item.length, 1, 12);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'M': {
auto const cv = check_value(ptr, item.length, 0, 59);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'S': {
auto const cv = check_value(ptr, item.length, 0, 60);
result = cv.first;
bytes_read -= cv.second;
break;
}
case 'f': {
int32_t const read_size =
std::min(static_cast<int32_t>(item.length), static_cast<int32_t>(length));
result = check_digits(ptr, read_size);
bytes_read = read_size;
break;
}
case 'p': {
if (item.length == 2) {
string_view am_pm(ptr, 2);
result = (am_pm.compare("AM", 2) == 0) || (am_pm.compare("am", 2) == 0) ||
(am_pm.compare("PM", 2) == 0) || (am_pm.compare("pm", 2) == 0);
}
break;
}
case 'z': { // timezone offset
if (item.length == 5) {
auto const cvh = check_value(ptr + 1, 2, 0, 23);
auto const cvm = check_value(ptr + 3, 2, 0, 59);
result = (*ptr == '-' || *ptr == '+') && cvh.first && cvm.first;
bytes_read -= cvh.second + cvm.second;
}
break;
}
case 'Z': result = true; // skip
default: break;
}
if (!result) return thrust::nullopt;
ptr += bytes_read;
length -= bytes_read;
}
return dateparts;
}
__device__ bool operator()(size_type idx)
{
if (d_strings.is_null(idx)) return false;
string_view d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) return false;
auto const dateparts = check_string(d_str);
if (!dateparts.has_value()) return false;
auto const year = dateparts.value().year;
auto const month = static_cast<uint32_t>(dateparts.value().month);
auto const day = static_cast<uint32_t>(dateparts.value().day);
return cuda::std::chrono::year_month_day(cuda::std::chrono::year{year},
cuda::std::chrono::month{month},
cuda::std::chrono::day{day})
.ok();
}
};
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& input,
std::string const& format,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = input.size();
if (strings_count == 0) return make_empty_column(type_id::BOOL8);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
auto d_strings = column_device_view::create(input.parent(), stream);
auto results = make_numeric_column(data_type{type_id::BOOL8},
strings_count,
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
format_compiler compiler(format, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
check_datetime_format{*d_strings, compiler.format_items()});
results->set_null_count(input.null_count());
return results;
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> to_timestamps(strings_column_view const& input,
data_type timestamp_type,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_timestamps(input, timestamp_type, format, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> is_timestamp(strings_column_view const& input,
std::string const& format,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_timestamp(input, format, rmm::cuda_stream_default, mr);
}
namespace detail {
namespace {
constexpr size_type format_names_size = 40; // 2(am/pm) + 2x7(weekdays) + 2x12(months)
constexpr size_type offset_weekdays = 2;
constexpr size_type offset_months = 16;
constexpr size_type days_in_week = 7;
constexpr size_type months_in_year = 12;
/**
* @brief Time components used by the date_time_formatter
*/
struct time_components {
int8_t hour;
int8_t minute;
int8_t second;
int32_t subsecond;
};
/**
* @brief Base class for the `from_timestamps_size_fn` and the `date_time_formatter`
*
* These contain some common utility functions used by both subclasses.
*/
template <typename T>
struct from_timestamp_base {
/**
* @brief Specialized modulo expression that handles negative values.
*
* @code{.pseudo}
* Examples:
* modulo(1,60) -> 1
* modulo(-1,60) -> 59
* @endcode
*/
__device__ int32_t modulo_time(int64_t time, int64_t base) const
{
return static_cast<int32_t>(((time % base) + base) % base);
};
/**
* @brief This function handles converting units by dividing and adjusting for negative values.
*
* @code{.pseudo}
* Examples:
* scale(-61,60) -> -2
* scale(-60,60) -> -1
* scale(-59,60) -> -1
* scale( 59,60) -> 0
* scale( 60,60) -> 1
* scale( 61,60) -> 1
* @endcode
*/
__device__ int32_t scale_time(int64_t time, int64_t base) const
{
return static_cast<int32_t>((time - ((time < 0) * (base - 1L))) / base);
};
__device__ time_components get_time_components(int64_t tstamp) const
{
time_components result = {0};
if constexpr (std::is_same_v<T, cudf::timestamp_D>) { return result; }
// Note: Tried using: cuda::std::chrono::hh_mm_ss(T::duration(timestamp));
// and retrieving the hour, minute, second, and subsecond values from it
// but it did not scale/modulo the components for negative timestamps
// correctly -- it simply did an abs(timestamp) as documented here:
// https://en.cppreference.com/w/cpp/chrono/hh_mm_ss/hh_mm_ss
if constexpr (not std::is_same_v<T, cudf::timestamp_s>) {
int64_t constexpr base = T::period::type::den; // 1000=ms, 1000000=us, etc
auto const subsecond = modulo_time(tstamp, base);
tstamp = tstamp / base - ((tstamp < 0) and (subsecond != 0));
result.subsecond = subsecond;
}
result.hour = modulo_time(scale_time(tstamp, 3600), 24);
result.minute = modulo_time(scale_time(tstamp, 60), 60);
result.second = modulo_time(tstamp, 60);
return result;
}
};
template <typename T>
struct from_timestamps_size_fn : public from_timestamp_base<T> {
column_device_view const d_timestamps;
column_device_view const d_format_names;
device_span<format_item const> const d_format_items;
from_timestamps_size_fn(column_device_view const& d_timestamps,
column_device_view const& d_format_names,
device_span<format_item const> const& d_format_items)
: d_timestamps(d_timestamps), d_format_names(d_format_names), d_format_items(d_format_items)
{
}
__device__ size_type operator()(size_type idx) const
{
if (d_timestamps.is_null(idx)) { return 0; }
// We only dissect the timestamp into components if needed
// by a specifier. And then we only do it once and reuse it.
// This can improve performance when not using uncommon specifiers.
thrust::optional<cuda::std::chrono::sys_days> days;
auto days_from_timestamp = [&]() {
auto const tstamp = d_timestamps.element<T>(idx).time_since_epoch().count();
return cuda::std::chrono::sys_days(static_cast<cudf::timestamp_D::duration>(
floor<cuda::std::chrono::days>(T::duration(tstamp))));
};
size_type bytes = 0; // output size
for (auto item : d_format_items) {
if (item.item_type == format_char_type::literal) {
bytes += item.length;
continue;
}
// only specifiers resulting in strings require special logic
switch (item.value) {
case 'a': // weekday abbreviated
case 'A': { // weekday full name
if (!days.has_value()) { days = days_from_timestamp(); }
auto const day_of_week =
cuda::std::chrono::year_month_weekday(days.value()).weekday().c_encoding();
auto const day_idx =
day_of_week + offset_weekdays + (item.value == 'a' ? days_in_week : 0);
if (day_idx < d_format_names.size())
bytes += d_format_names.element<cudf::string_view>(day_idx).size_bytes();
break;
}
case 'b': // month abbreviated
case 'B': { // month full name
if (!days.has_value()) { days = days_from_timestamp(); }
auto const month =
static_cast<uint32_t>(cuda::std::chrono::year_month_day(days.value()).month());
auto const month_idx =
month - 1 + offset_months + (item.value == 'b' ? months_in_year : 0);
if (month_idx < d_format_names.size())
bytes += d_format_names.element<cudf::string_view>(month_idx).size_bytes();
break;
}
case 'p': // AM/PM
{
auto times = get_time_components(d_timestamps.element<T>(idx).time_since_epoch().count());
bytes += d_format_names.size() > 1
? d_format_names.element<cudf::string_view>(static_cast<int>(times.hour >= 12))
.size_bytes()
: 2;
break;
}
default: {
bytes += item.length;
break;
}
}
}
return bytes;
}
};
// converts a timestamp into date-time formatted string
template <typename T>
struct datetime_formatter : public from_timestamp_base<T> {
column_device_view const d_timestamps;
column_device_view const d_format_names;
device_span<format_item const> const d_format_items;
int32_t const* d_offsets{};
char* d_chars{};
datetime_formatter(column_device_view const& d_timestamps,
column_device_view const& d_format_names,
device_span<format_item const> const& d_format_items,
int32_t const* d_offsets,
char* d_chars)
: d_timestamps(d_timestamps),
d_format_names(d_format_names),
d_format_items(d_format_items),
d_offsets(d_offsets),
d_chars(d_chars)
{
}
// utility to create 0-padded integers (up to 9 chars)
__device__ char* int2str(char* str, int bytes, int val)
{
char tmpl[9] = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
char* ptr = tmpl;
while (val > 0) {
int digit = val % 10;
*ptr++ = '0' + digit;
val = val / 10;
}
ptr = tmpl + bytes - 1;
while (bytes-- > 0)
*str++ = *ptr--;
return str;
}
// from https://howardhinnant.github.io/date/date.html
__device__ thrust::pair<int32_t, int32_t> get_iso_week_year(
cuda::std::chrono::year_month_day const& ymd) const
{
auto const days = cuda::std::chrono::sys_days(ymd);
auto year = ymd.year();
auto iso_week_start = [](cuda::std::chrono::year const y) {
// clang-format off
return cuda::std::chrono::sys_days{cuda::std::chrono::Thursday[1]/cuda::std::chrono::January/y} -
(cuda::std::chrono::Thursday - cuda::std::chrono::Monday);
// clang-format on
};
auto start = iso_week_start(year);
if (days < start)
start = iso_week_start(--year);
else {
auto const next_start = iso_week_start(year + cuda::std::chrono::years{1});
if (days >= next_start) {
++year;
start = next_start;
}
}
return thrust::make_pair(
(cuda::std::chrono::duration_cast<cuda::std::chrono::weeks>(days - start) +
cuda::std::chrono::weeks{1}) // always [1-53]
.count(),
static_cast<int32_t>(year));
}
__device__ int8_t get_week_of_year(cuda::std::chrono::sys_days const days,
cuda::std::chrono::sys_days const start) const
{
return days < start
? 0
: (cuda::std::chrono::duration_cast<cuda::std::chrono::weeks>(days - start) +
cuda::std::chrono::weeks{1})
.count();
}
__device__ int32_t get_day_of_year(cuda::std::chrono::year_month_day const& ymd)
{
auto const month = static_cast<uint32_t>(ymd.month());
auto const day = static_cast<uint32_t>(ymd.day());
int32_t const monthDayOffset[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
return static_cast<int32_t>(day + monthDayOffset[month - 1] +
(month > 2 and ymd.year().is_leap()));
}
__device__ void operator()(size_type idx)
{
if (d_timestamps.is_null(idx)) return;
auto tstamp = d_timestamps.element<T>(idx).time_since_epoch().count();
auto const days = cuda::std::chrono::sys_days(static_cast<cudf::timestamp_D::duration>(
cuda::std::chrono::floor<cuda::std::chrono::days>(T::duration(tstamp))));
auto const ymd = cuda::std::chrono::year_month_day(days);
auto timeparts = get_time_components(tstamp);
// convert to characters using the format items
auto ptr = d_chars + d_offsets[idx];
for (auto item : d_format_items) {
if (item.item_type == format_char_type::literal) {
*ptr++ = item.value;
continue;
}
// Value to use for int2str call at the end of the switch-statement.
// This simplifies the case statements and prevents a lot of extra inlining.
int32_t copy_value = -1; // default set for non-int2str usage cases
// special logic for each specifier
switch (item.value) {
case 'Y': // 4-digit year
copy_value = static_cast<int32_t>(ymd.year());
break;
case 'y': // 2-digit year
{
auto year = static_cast<int32_t>(ymd.year());
// remove hundredths digits and above
copy_value = year - ((year / 100) * 100);
break;
}
case 'm': // month
copy_value = static_cast<int32_t>(static_cast<uint32_t>(ymd.month()));
break;
case 'd': // day of month
copy_value = static_cast<int32_t>(static_cast<uint32_t>(ymd.day()));
break;
case 'j': // day of year
copy_value = get_day_of_year(ymd);
break;
case 'H': // 24-hour
copy_value = timeparts.hour;
break;
case 'I': // 12-hour
{
// 0 = 12am; 12 = 12pm; 6 = 06am; 18 = 06pm
copy_value = [h = timeparts.hour] {
if (h == 0) return 12;
return h > 12 ? h - 12 : h;
}();
break;
}
case 'M': // minute
copy_value = timeparts.minute;
break;
case 'S': // second
copy_value = timeparts.second;
break;
case 'f': // sub-second
{
char subsecond_digits[] = "000000000"; // 9 max digits
const int digits = [] {
if constexpr (std::is_same_v<T, cudf::timestamp_ms>) return 3;
if constexpr (std::is_same_v<T, cudf::timestamp_us>) return 6;
if constexpr (std::is_same_v<T, cudf::timestamp_ns>) return 9;
return 0;
}();
int2str(subsecond_digits, digits, timeparts.subsecond);
ptr = copy_and_increment(ptr, subsecond_digits, item.length);
break;
}
case 'p': // am or pm
{
// 0 = 12am, 12 = 12pm
auto const am_pm = [&] {
if (d_format_names.size() > 1)
return d_format_names.element<cudf::string_view>(
static_cast<int>(timeparts.hour >= 12));
return string_view(timeparts.hour >= 12 ? "PM" : "AM", 2);
}();
ptr = copy_string(ptr, am_pm);
break;
}
case 'z': // timezone -- always UTC
ptr = copy_and_increment(ptr, "+0000", 5);
break;
case 'Z': // timezone string -- always UTC
ptr = copy_and_increment(ptr, "UTC", 3);
break;
case 'u': // day of week ISO
case 'w': { // day of week non-ISO
auto const day_of_week = static_cast<int32_t>(
cuda::std::chrono::year_month_weekday(days).weekday().c_encoding());
copy_value = day_of_week == 0 && item.value == 'u' ? 7 : day_of_week;
break;
}
// clang-format off
case 'U': { // week of year: first week includes the first Sunday of the year
copy_value = get_week_of_year(days, cuda::std::chrono::sys_days{
cuda::std::chrono::Sunday[1]/cuda::std::chrono::January/ymd.year()});
break;
}
case 'W': { // week of year: first week includes the first Monday of the year
copy_value = get_week_of_year(days, cuda::std::chrono::sys_days{
cuda::std::chrono::Monday[1]/cuda::std::chrono::January/ymd.year()});
break;
}
// clang-format on
case 'V': // ISO week number
case 'G': { // ISO year number
auto const [week, year] = get_iso_week_year(ymd);
copy_value = item.value == 'G' ? year : week;
break;
}
case 'a': // abbreviated day of the week
case 'A': { // day of the week
auto const day_of_week =
cuda::std::chrono::year_month_weekday(days).weekday().c_encoding();
auto const day_idx =
day_of_week + offset_weekdays + (item.value == 'a' ? days_in_week : 0);
if (d_format_names.size())
ptr = copy_string(ptr, d_format_names.element<cudf::string_view>(day_idx));
break;
}
case 'b': // abbreviated month of the year
case 'B': { // month of the year
auto const month = static_cast<uint32_t>(ymd.month());
auto const month_idx =
month - 1 + offset_months + (item.value == 'b' ? months_in_year : 0);
if (d_format_names.size())
ptr = copy_string(ptr, d_format_names.element<cudf::string_view>(month_idx));
break;
}
default: break;
}
if (copy_value >= 0) ptr = int2str(ptr, item.length, copy_value);
}
}
};
//
using strings_children = std::pair<std::unique_ptr<cudf::column>, std::unique_ptr<cudf::column>>;
struct dispatch_from_timestamps_fn {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
strings_children operator()(column_device_view const& d_timestamps,
column_device_view const& d_format_names,
device_span<format_item const> d_format_items,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
size_type const strings_count = d_timestamps.size();
// build offsets column
auto offsets_transformer_itr = cudf::detail::make_counting_transform_iterator(
0, from_timestamps_size_fn<T>{d_timestamps, d_format_names, d_format_items});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_offsets = offsets_column->mutable_view().template data<offset_type>();
// build chars column
auto const bytes =
cudf::detail::get_value<offset_type>(offsets_column->view(), strings_count, stream);
auto chars_column = create_chars_child_column(bytes, stream, mr);
auto d_chars = chars_column->mutable_view().template data<char>();
datetime_formatter<T> pfn{d_timestamps, d_format_names, d_format_items, d_offsets, d_chars};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
d_timestamps.size(),
pfn);
return std::make_pair(std::move(offsets_column), std::move(chars_column));
}
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_timestamp<T>(), strings_children> operator()(Args&&...) const
{
CUDF_FAIL("Only timestamps type are expected");
}
};
} // namespace
//
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
strings_column_view const& names,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (timestamps.is_empty()) return make_empty_column(type_id::STRING);
CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty.");
CUDF_EXPECTS(names.is_empty() || names.size() == format_names_size,
"Invalid size for format names.");
auto const d_names = column_device_view::create(names.parent(), stream);
// This API supports a few more specifiers than to_timestamps.
// clang-format off
format_compiler compiler(format, stream,
specifier_map{{'w', 1}, {'W', 2}, {'u', 1}, {'U', 2}, {'V', 2}, {'G', 4},
{'a', 3}, {'A', 3}, {'b', 3}, {'B', 3}});
// clang-format on
auto const d_format_items = compiler.format_items();
auto const d_timestamps = column_device_view::create(timestamps, stream);
// dispatcher is called to handle the different timestamp types
auto [offsets_column, chars_column] = cudf::type_dispatcher(timestamps.type(),
dispatch_from_timestamps_fn(),
*d_timestamps,
*d_names,
d_format_items,
stream,
mr);
return make_strings_column(timestamps.size(),
std::move(offsets_column),
std::move(chars_column),
timestamps.null_count(),
cudf::detail::copy_bitmask(timestamps, stream, mr));
}
} // namespace detail
// external API
std::unique_ptr<column> from_timestamps(column_view const& timestamps,
std::string const& format,
strings_column_view const& names,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_timestamps(timestamps, format, names, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
1c40620a4ac562b855726048299c190c54addc31.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "volume.h"
int main(int argc, char *argv[]) {
FILE *fp = NULL;
tSTL input;
uint threads, blocks;
float cpu_result, *gpu_result, *tmp_result, *reduce;
tMesh cpu_mesh, gpu_mesh;
// timing stuff
struct timeval t1, t2;
hipEvent_t start, stop;
float dt_cpu, dt_gpu;
hipEventCreate(&start);
hipEventCreate(&stop);
// parse arguments
if (argc != 3) {
fprintf(stderr, "Usage: volume <n> <file>\n");
fprintf(stderr, "\tn\tnumber of threads per block (32,64,128,256,512,1024)\n");
fprintf(stderr, "\tfile\tpath to an STL file\n");
return -1;
}
// validate threads per block
threads = atoi(argv[1]);
switch (threads) {
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
break;
default:
fprintf(stderr, "Wrong number of threads per block!\n");
return -1;
}
// open input file
fp = fopen(argv[2], "rb");
if (fp == NULL) {
fprintf(stderr, "Input file could not be opened!\n");
return -1;
}
// read file header
fseek(fp, sizeof(char) * 80, SEEK_SET);
fread(&cpu_mesh.num, sizeof(uint32_t), 1, fp);
// allocate CPU mesh
cpu_mesh.a = (float4 *) malloc(sizeof(float4) * cpu_mesh.num);
cpu_mesh.b = (float4 *) malloc(sizeof(float4) * cpu_mesh.num);
cpu_mesh.c = (float4 *) malloc(sizeof(float4) * cpu_mesh.num);
// read the triangles from file
for (int i=0; i<cpu_mesh.num; i++) {
fread(&input, sizeof(tSTL), 1, fp);
cpu_mesh.a[i].x = input.points[0];
cpu_mesh.a[i].y = input.points[1];
cpu_mesh.a[i].z = input.points[2];
cpu_mesh.b[i].x = input.points[3];
cpu_mesh.b[i].y = input.points[4];
cpu_mesh.b[i].z = input.points[5];
cpu_mesh.c[i].x = input.points[6];
cpu_mesh.c[i].y = input.points[7];
cpu_mesh.c[i].z = input.points[8];
}
fclose(fp);
// calculate reference solution on CPU
gettimeofday(&t1, 0);
volume_calculate_cpu(cpu_mesh, &cpu_result);
gettimeofday(&t2, 0);
dt_cpu = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0;
// set parameters for kernel
blocks = ceil(((float)cpu_mesh.num) / ((float)threads));
gpu_mesh.num = threads * blocks;
// allocate
hipMalloc(&gpu_mesh.a, sizeof(float4) * gpu_mesh.num);
hipMalloc(&gpu_mesh.b, sizeof(float4) * gpu_mesh.num);
hipMalloc(&gpu_mesh.c, sizeof(float4) * gpu_mesh.num);
// copy
hipMemcpy(gpu_mesh.a, cpu_mesh.a, sizeof(float4) * cpu_mesh.num, hipMemcpyHostToDevice);
hipMemcpy(gpu_mesh.b, cpu_mesh.b, sizeof(float4) * cpu_mesh.num, hipMemcpyHostToDevice);
hipMemcpy(gpu_mesh.c, cpu_mesh.c, sizeof(float4) * cpu_mesh.num, hipMemcpyHostToDevice);
// set the padding
hipMemset(&gpu_mesh.a[cpu_mesh.num], 0, sizeof(float4) * (gpu_mesh.num - cpu_mesh.num));
hipMemset(&gpu_mesh.b[cpu_mesh.num], 0, sizeof(float4) * (gpu_mesh.num - cpu_mesh.num));
hipMemset(&gpu_mesh.c[cpu_mesh.num], 0, sizeof(float4) * (gpu_mesh.num - cpu_mesh.num));
// allocate memory for the results
tmp_result = (float *) malloc(sizeof(float) * blocks);
hipMalloc(&gpu_result, sizeof(float) * blocks);
hipMalloc(&reduce, sizeof(float) * gpu_mesh.num);
// invoke kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( volume_calculate_gpu), dim3(blocks),dim3(threads), 0, 0, gpu_mesh, gpu_result, reduce);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&dt_gpu, start, stop);
// copy back and sum
hipMemcpy(tmp_result, gpu_result, sizeof(float) * blocks, hipMemcpyDeviceToHost);
for (int i=1; i<blocks; i++) {
tmp_result[0] += tmp_result[i];
}
// print results
printf("Number of triangles %d, padded in GPU to %d\n", cpu_mesh.num, gpu_mesh.num);
printf("Volume calculated by CPU: %0.3f in %fms\n", abs(cpu_result), dt_cpu);
printf("Volume calculated by GPU: %0.3f in %fms\n", abs(tmp_result[0]), dt_gpu);
// clean up
free(cpu_mesh.a);
free(cpu_mesh.b);
free(cpu_mesh.c);
free(tmp_result);
hipFree(gpu_mesh.a);
hipFree(gpu_mesh.b);
hipFree(gpu_mesh.c);
hipFree(gpu_result);
hipFree(reduce);
}
|
1c40620a4ac562b855726048299c190c54addc31.cu
|
#include "volume.h"
int main(int argc, char *argv[]) {
FILE *fp = NULL;
tSTL input;
uint threads, blocks;
float cpu_result, *gpu_result, *tmp_result, *reduce;
tMesh cpu_mesh, gpu_mesh;
// timing stuff
struct timeval t1, t2;
cudaEvent_t start, stop;
float dt_cpu, dt_gpu;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// parse arguments
if (argc != 3) {
fprintf(stderr, "Usage: volume <n> <file>\n");
fprintf(stderr, "\tn\tnumber of threads per block (32,64,128,256,512,1024)\n");
fprintf(stderr, "\tfile\tpath to an STL file\n");
return -1;
}
// validate threads per block
threads = atoi(argv[1]);
switch (threads) {
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
break;
default:
fprintf(stderr, "Wrong number of threads per block!\n");
return -1;
}
// open input file
fp = fopen(argv[2], "rb");
if (fp == NULL) {
fprintf(stderr, "Input file could not be opened!\n");
return -1;
}
// read file header
fseek(fp, sizeof(char) * 80, SEEK_SET);
fread(&cpu_mesh.num, sizeof(uint32_t), 1, fp);
// allocate CPU mesh
cpu_mesh.a = (float4 *) malloc(sizeof(float4) * cpu_mesh.num);
cpu_mesh.b = (float4 *) malloc(sizeof(float4) * cpu_mesh.num);
cpu_mesh.c = (float4 *) malloc(sizeof(float4) * cpu_mesh.num);
// read the triangles from file
for (int i=0; i<cpu_mesh.num; i++) {
fread(&input, sizeof(tSTL), 1, fp);
cpu_mesh.a[i].x = input.points[0];
cpu_mesh.a[i].y = input.points[1];
cpu_mesh.a[i].z = input.points[2];
cpu_mesh.b[i].x = input.points[3];
cpu_mesh.b[i].y = input.points[4];
cpu_mesh.b[i].z = input.points[5];
cpu_mesh.c[i].x = input.points[6];
cpu_mesh.c[i].y = input.points[7];
cpu_mesh.c[i].z = input.points[8];
}
fclose(fp);
// calculate reference solution on CPU
gettimeofday(&t1, 0);
volume_calculate_cpu(cpu_mesh, &cpu_result);
gettimeofday(&t2, 0);
dt_cpu = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0;
// set parameters for kernel
blocks = ceil(((float)cpu_mesh.num) / ((float)threads));
gpu_mesh.num = threads * blocks;
// allocate
cudaMalloc(&gpu_mesh.a, sizeof(float4) * gpu_mesh.num);
cudaMalloc(&gpu_mesh.b, sizeof(float4) * gpu_mesh.num);
cudaMalloc(&gpu_mesh.c, sizeof(float4) * gpu_mesh.num);
// copy
cudaMemcpy(gpu_mesh.a, cpu_mesh.a, sizeof(float4) * cpu_mesh.num, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_mesh.b, cpu_mesh.b, sizeof(float4) * cpu_mesh.num, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_mesh.c, cpu_mesh.c, sizeof(float4) * cpu_mesh.num, cudaMemcpyHostToDevice);
// set the padding
cudaMemset(&gpu_mesh.a[cpu_mesh.num], 0, sizeof(float4) * (gpu_mesh.num - cpu_mesh.num));
cudaMemset(&gpu_mesh.b[cpu_mesh.num], 0, sizeof(float4) * (gpu_mesh.num - cpu_mesh.num));
cudaMemset(&gpu_mesh.c[cpu_mesh.num], 0, sizeof(float4) * (gpu_mesh.num - cpu_mesh.num));
// allocate memory for the results
tmp_result = (float *) malloc(sizeof(float) * blocks);
cudaMalloc(&gpu_result, sizeof(float) * blocks);
cudaMalloc(&reduce, sizeof(float) * gpu_mesh.num);
// invoke kernel
cudaEventRecord(start, 0);
volume_calculate_gpu<<<blocks,threads>>>(gpu_mesh, gpu_result, reduce);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_gpu, start, stop);
// copy back and sum
cudaMemcpy(tmp_result, gpu_result, sizeof(float) * blocks, cudaMemcpyDeviceToHost);
for (int i=1; i<blocks; i++) {
tmp_result[0] += tmp_result[i];
}
// print results
printf("Number of triangles %d, padded in GPU to %d\n", cpu_mesh.num, gpu_mesh.num);
printf("Volume calculated by CPU: %0.3f in %fms\n", abs(cpu_result), dt_cpu);
printf("Volume calculated by GPU: %0.3f in %fms\n", abs(tmp_result[0]), dt_gpu);
// clean up
free(cpu_mesh.a);
free(cpu_mesh.b);
free(cpu_mesh.c);
free(tmp_result);
cudaFree(gpu_mesh.a);
cudaFree(gpu_mesh.b);
cudaFree(gpu_mesh.c);
cudaFree(gpu_result);
cudaFree(reduce);
}
|
3cdc6cc71a43a16a35441a411623621efb43d003.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2017-2022 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "ColorSpace.h"
__constant__ float matYuv2Rgb[3][3];
__constant__ float matRgb2Yuv[3][3];
void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) {
black = 16; white = 235;
max = 255;
switch (iMatrix)
{
case ColorSpaceStandard_BT709:
default:
wr = 0.2126f; wb = 0.0722f;
break;
case ColorSpaceStandard_FCC:
wr = 0.30f; wb = 0.11f;
break;
case ColorSpaceStandard_BT470:
case ColorSpaceStandard_BT601:
wr = 0.2990f; wb = 0.1140f;
break;
case ColorSpaceStandard_SMPTE240M:
wr = 0.212f; wb = 0.087f;
break;
case ColorSpaceStandard_BT2020:
case ColorSpaceStandard_BT2020C:
wr = 0.2627f; wb = 0.0593f;
// 10-bit only
black = 64 << 6; white = 940 << 6;
max = (1 << 16) - 1;
break;
}
}
void SetMatYuv2Rgb(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
1.0f, 0.0f, (1.0f - wr) / 0.5f,
1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr),
1.0f, (1.0f - wb) / 0.5f, 0.0f,
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]);
}
}
hipMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat));
}
void SetMatRgb2Yuv(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
wr, 1.0f - wb - wr, wb,
-0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f,
0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr),
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]);
}
}
hipMemcpyToSymbol(matRgb2Yuv, mat, sizeof(mat));
}
template<class T>
__device__ static T Clamp(T x, T lower, T upper) {
return x < lower ? lower : (x > upper ? upper : x);
}
template<class Rgb, class YuvUnit>
__device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) {
const int
low = 1 << (sizeof(YuvUnit) * 8 - 4),
mid = 1 << (sizeof(YuvUnit) * 8 - 1);
float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid;
const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f;
YuvUnit
r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf),
g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf),
b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf);
Rgb rgb{};
const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8;
if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) {
rgb.c.r = r >> nShift;
rgb.c.g = g >> nShift;
rgb.c.b = b >> nShift;
} else {
rgb.c.r = r << nShift;
rgb.c.g = g << nShift;
rgb.c.b = b << nShift;
}
return rgb;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
*(RgbIntx2 *)pDst = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d,
};
*(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void Yuv444ToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
*(RgbIntx2 *)pDst = RgbIntx2{
YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x).d,
YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void YuvToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y),
rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y),
rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.x, rgb1.v.x};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.x, rgb3.v.x};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.y, rgb1.v.y};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.y, rgb3.v.y};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.z, rgb1.v.z};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.z, rgb3.v.z};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void Yuv444ToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.x, rgb1.v.x };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.y, rgb1.v.y };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.z, rgb1.v.z };
}
template <class COLOR32>
void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void Nv12ToColor64(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<uchar2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<uchar2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColor32(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<ushort2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void P016ToColor64(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbKernel<ushort2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<ushort2, COLOR32, uint2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444P16ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbKernel<ushort2, COLOR64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void Nv12ToColorPlanar(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbPlanarKernel<uchar2, COLOR32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColorPlanar(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( YuvToRgbPlanarKernel<ushort2, COLOR32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpP016, nP016Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
hipLaunchKernelGGL(( Yuv444ToRgbPlanarKernel<uchar2, COLOR32, uchar2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2)), dim3(dim3(32, 2)) , 0, 0,
dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbPlanarKernel<ushort2, COLOR32, uchar2>
<< <dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >> >
(dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
// Explicit Instantiation
template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<BGRA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<RGBA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<BGRA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<RGBA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToY(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit low = 1 << (sizeof(YuvUnit) * 8 - 4);
return matRgb2Yuv[0][0] * r + matRgb2Yuv[0][1] * g + matRgb2Yuv[0][2] * b + low;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToU(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[1][0] * r + matRgb2Yuv[1][1] * g + matRgb2Yuv[1][2] * b + mid;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToV(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[2][0] * r + matRgb2Yuv[2][1] * g + matRgb2Yuv[2][2] * b + mid;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void RgbToYuvKernel(uint8_t *pRgb, int nRgbPitch, uint8_t *pYuv, int nYuvPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
RgbIntx2 int2a = *(RgbIntx2 *)pSrc;
RgbIntx2 int2b = *(RgbIntx2 *)(pSrc + nRgbPitch);
Rgb rgb[4] = {int2a.x, int2a.y, int2b.x, int2b.y};
decltype(Rgb::c.r)
r = (rgb[0].c.r + rgb[1].c.r + rgb[2].c.r + rgb[3].c.r) / 4,
g = (rgb[0].c.g + rgb[1].c.g + rgb[2].c.g + rgb[3].c.g) / 4,
b = (rgb[0].c.b + rgb[1].c.b + rgb[2].c.b + rgb[3].c.b) / 4;
uint8_t *pDst = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
*(YuvUnitx2 *)pDst = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[0].c.r, rgb[0].c.g, rgb[0].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[1].c.r, rgb[1].c.g, rgb[1].c.b),
};
*(YuvUnitx2 *)(pDst + nYuvPitch) = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[2].c.r, rgb[2].c.g, rgb[2].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[3].c.r, rgb[3].c.g, rgb[3].c.b),
};
*(YuvUnitx2 *)(pDst + (nHeight - y / 2) * nYuvPitch) = YuvUnitx2 {
RgbToU<decltype(YuvUnitx2::x)>(r, g, b),
RgbToV<decltype(YuvUnitx2::x)>(r, g, b),
};
}
void Bgra64ToP016(uint8_t *dpBgra, int nBgraPitch, uint8_t *dpP016, int nP016Pitch, int nWidth, int nHeight, int iMatrix) {
SetMatRgb2Yuv(iMatrix);
hipLaunchKernelGGL(( RgbToYuvKernel<ushort2, BGRA64, ulonglong2>)
, dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0,
dpBgra, nBgraPitch, dpP016, nP016Pitch, nWidth, nHeight);
}
|
3cdc6cc71a43a16a35441a411623621efb43d003.cu
|
/*
* Copyright 2017-2022 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "ColorSpace.h"
__constant__ float matYuv2Rgb[3][3];
__constant__ float matRgb2Yuv[3][3];
void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) {
black = 16; white = 235;
max = 255;
switch (iMatrix)
{
case ColorSpaceStandard_BT709:
default:
wr = 0.2126f; wb = 0.0722f;
break;
case ColorSpaceStandard_FCC:
wr = 0.30f; wb = 0.11f;
break;
case ColorSpaceStandard_BT470:
case ColorSpaceStandard_BT601:
wr = 0.2990f; wb = 0.1140f;
break;
case ColorSpaceStandard_SMPTE240M:
wr = 0.212f; wb = 0.087f;
break;
case ColorSpaceStandard_BT2020:
case ColorSpaceStandard_BT2020C:
wr = 0.2627f; wb = 0.0593f;
// 10-bit only
black = 64 << 6; white = 940 << 6;
max = (1 << 16) - 1;
break;
}
}
void SetMatYuv2Rgb(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
1.0f, 0.0f, (1.0f - wr) / 0.5f,
1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr),
1.0f, (1.0f - wb) / 0.5f, 0.0f,
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]);
}
}
cudaMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat));
}
void SetMatRgb2Yuv(int iMatrix) {
float wr, wb;
int black, white, max;
GetConstants(iMatrix, wr, wb, black, white, max);
float mat[3][3] = {
wr, 1.0f - wb - wr, wb,
-0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f,
0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr),
};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]);
}
}
cudaMemcpyToSymbol(matRgb2Yuv, mat, sizeof(mat));
}
template<class T>
__device__ static T Clamp(T x, T lower, T upper) {
return x < lower ? lower : (x > upper ? upper : x);
}
template<class Rgb, class YuvUnit>
__device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) {
const int
low = 1 << (sizeof(YuvUnit) * 8 - 4),
mid = 1 << (sizeof(YuvUnit) * 8 - 1);
float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid;
const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f;
YuvUnit
r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf),
g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf),
b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf);
Rgb rgb{};
const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8;
if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) {
rgb.c.r = r >> nShift;
rgb.c.g = g >> nShift;
rgb.c.b = b >> nShift;
} else {
rgb.c.r = r << nShift;
rgb.c.g = g << nShift;
rgb.c.b = b << nShift;
}
return rgb;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
*(RgbIntx2 *)pDst = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d,
};
*(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 {
YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d,
YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void Yuv444ToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
*(RgbIntx2 *)pDst = RgbIntx2{
YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x).d,
YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y).d,
};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void YuvToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch);
YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch);
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y),
rgb2 = YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y),
rgb3 = YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.x, rgb1.v.x};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.x, rgb3.v.x};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.y, rgb1.v.y};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.y, rgb3.v.y};
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2 {rgb0.v.z, rgb1.v.z};
*(RgbUnitx2 *)(pDst + nRgbpPitch) = RgbUnitx2 {rgb2.v.z, rgb3.v.z};
}
template<class YuvUnitx2, class Rgb, class RgbUnitx2>
__global__ static void Yuv444ToRgbPlanarKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgbp, int nRgbpPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y);
if (x + 1 >= nWidth || y >= nHeight) {
return;
}
uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc;
YuvUnitx2 ch1 = *(YuvUnitx2 *)(pSrc + (nHeight * nYuvPitch));
YuvUnitx2 ch2 = *(YuvUnitx2 *)(pSrc + (2 * nHeight * nYuvPitch));
Rgb rgb0 = YuvToRgbForPixel<Rgb>(l0.x, ch1.x, ch2.x),
rgb1 = YuvToRgbForPixel<Rgb>(l0.y, ch1.y, ch2.y);
uint8_t *pDst = pRgbp + x * sizeof(RgbUnitx2) / 2 + y * nRgbpPitch;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.x, rgb1.v.x };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.y, rgb1.v.y };
pDst += nRgbpPitch * nHeight;
*(RgbUnitx2 *)pDst = RgbUnitx2{ rgb0.v.z, rgb1.v.z };
}
template <class COLOR32>
void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<uchar2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void Nv12ToColor64(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<uchar2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<uchar2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<uchar2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColor32(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<ushort2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void P016ToColor64(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbKernel<ushort2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpP016, nP016Pitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColor32(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<ushort2, COLOR32, uint2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR64>
void YUV444P16ToColor64(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbKernel<ushort2, COLOR64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgra, nBgraPitch, nWidth, nHeight);
}
template <class COLOR32>
void Nv12ToColorPlanar(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbPlanarKernel<uchar2, COLOR32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpNv12, nNv12Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void P016ToColorPlanar(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
YuvToRgbPlanarKernel<ushort2, COLOR32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpP016, nP016Pitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbPlanarKernel<uchar2, COLOR32, uchar2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >>>
(dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
template <class COLOR32>
void YUV444P16ToColorPlanar(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix) {
SetMatYuv2Rgb(iMatrix);
Yuv444ToRgbPlanarKernel<ushort2, COLOR32, uchar2>
<< <dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2), dim3(32, 2) >> >
(dpYUV444, nPitch, dpBgrp, nBgrpPitch, nWidth, nHeight);
}
// Explicit Instantiation
template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<BGRA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColor64<RGBA64>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor32<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<BGRA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColor64<RGBA64>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor32<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<BGRA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColor64<RGBA64>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void Nv12ToColorPlanar<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<BGRA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void P016ToColorPlanar<RGBA32>(uint8_t *dpP016, int nP016Pitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<BGRA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template void YUV444P16ToColorPlanar<RGBA32>(uint8_t *dpYUV444, int nPitch, uint8_t *dpBgrp, int nBgrpPitch, int nWidth, int nHeight, int iMatrix);
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToY(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit low = 1 << (sizeof(YuvUnit) * 8 - 4);
return matRgb2Yuv[0][0] * r + matRgb2Yuv[0][1] * g + matRgb2Yuv[0][2] * b + low;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToU(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[1][0] * r + matRgb2Yuv[1][1] * g + matRgb2Yuv[1][2] * b + mid;
}
template<class YuvUnit, class RgbUnit>
__device__ inline YuvUnit RgbToV(RgbUnit r, RgbUnit g, RgbUnit b) {
const YuvUnit mid = 1 << (sizeof(YuvUnit) * 8 - 1);
return matRgb2Yuv[2][0] * r + matRgb2Yuv[2][1] * g + matRgb2Yuv[2][2] * b + mid;
}
template<class YuvUnitx2, class Rgb, class RgbIntx2>
__global__ static void RgbToYuvKernel(uint8_t *pRgb, int nRgbPitch, uint8_t *pYuv, int nYuvPitch, int nWidth, int nHeight) {
int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= nWidth || y + 1 >= nHeight) {
return;
}
uint8_t *pSrc = pRgb + x * sizeof(Rgb) + y * nRgbPitch;
RgbIntx2 int2a = *(RgbIntx2 *)pSrc;
RgbIntx2 int2b = *(RgbIntx2 *)(pSrc + nRgbPitch);
Rgb rgb[4] = {int2a.x, int2a.y, int2b.x, int2b.y};
decltype(Rgb::c.r)
r = (rgb[0].c.r + rgb[1].c.r + rgb[2].c.r + rgb[3].c.r) / 4,
g = (rgb[0].c.g + rgb[1].c.g + rgb[2].c.g + rgb[3].c.g) / 4,
b = (rgb[0].c.b + rgb[1].c.b + rgb[2].c.b + rgb[3].c.b) / 4;
uint8_t *pDst = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch;
*(YuvUnitx2 *)pDst = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[0].c.r, rgb[0].c.g, rgb[0].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[1].c.r, rgb[1].c.g, rgb[1].c.b),
};
*(YuvUnitx2 *)(pDst + nYuvPitch) = YuvUnitx2 {
RgbToY<decltype(YuvUnitx2::x)>(rgb[2].c.r, rgb[2].c.g, rgb[2].c.b),
RgbToY<decltype(YuvUnitx2::x)>(rgb[3].c.r, rgb[3].c.g, rgb[3].c.b),
};
*(YuvUnitx2 *)(pDst + (nHeight - y / 2) * nYuvPitch) = YuvUnitx2 {
RgbToU<decltype(YuvUnitx2::x)>(r, g, b),
RgbToV<decltype(YuvUnitx2::x)>(r, g, b),
};
}
void Bgra64ToP016(uint8_t *dpBgra, int nBgraPitch, uint8_t *dpP016, int nP016Pitch, int nWidth, int nHeight, int iMatrix) {
SetMatRgb2Yuv(iMatrix);
RgbToYuvKernel<ushort2, BGRA64, ulonglong2>
<<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>>
(dpBgra, nBgraPitch, dpP016, nP016Pitch, nWidth, nHeight);
}
|
78921fffbd648314038b292276c660d4914fc6ba.hip
|
// !!! This is a file automatically generated by hipify!!!
/*****
Exerccio 1 do minicurso
Rafael Sturaro Bernardelli - 2017
-- 90% copiado dos CUDA Samples
******/
#include <hip/hip_runtime.h>
#include <iostream>
#include <tiny_helper_cuda.h>
using namespace std;
//! vectorAdd: implementa o kernel da soma
/*!
A: vetor de entrada 1
B: vetor de entrada 2
C: resultado da soma
numElements:
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
//IMPLEMENTAR!! calcular o ndice do vetor
if (i < numElements)
{
//IMPLEMENTAR!! realizar a soma
}
}
int main(void)
{
int numElements = 20000;
size_t size = numElements * sizeof(float);
// alloca os vetores no host
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
// verifica sucesso
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
cerr << "Erro ao alocar os vetores no host" << endl;
exit(EXIT_FAILURE);
}
// inicializa com valores aleatrios
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// aloca os vetores na placa
float *d_A, *d_B, *d_C;
checkCudaErrors(hipMalloc((void **)&d_A, size));
checkCudaErrors(hipMalloc((void **)&d_B, size));
checkCudaErrors(hipMalloc((void **)&d_C, size));
// copia o contedo do host para a placa
checkCudaErrors(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice));
// Roda o kernel
//IMPLEMENTAR!! determinar dimGrid e dimBlock
hipLaunchKernelGGL(( vectorAdd), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, numElements);
getLastCudaError("vectorAdd kernel");
// copia o resultado para o host
checkCudaErrors(hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost));
// verifica se a soma est correta
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
cerr << "Erro! A soma no verifica!" << endl,
exit(EXIT_FAILURE);
}
}
cout << "A soma est correta! Passa no teste." << endl;
// libera a memria da placa
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
// libera a memria do host
free(h_A);
free(h_B);
free(h_C);
cout << "Fim" << endl;
return 0;
}
|
78921fffbd648314038b292276c660d4914fc6ba.cu
|
/*****
Exercício 1 do minicurso
Rafael Sturaro Bernardelli - 2017
-- 90% copiado dos CUDA Samples
******/
#include <cuda.h>
#include <iostream>
#include <tiny_helper_cuda.h>
using namespace std;
//! vectorAdd: implementa o kernel da soma
/*!
A: vetor de entrada 1
B: vetor de entrada 2
C: resultado da soma
numElements:
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
//IMPLEMENTAR!! calcular o índice do vetor
if (i < numElements)
{
//IMPLEMENTAR!! realizar a soma
}
}
int main(void)
{
int numElements = 20000;
size_t size = numElements * sizeof(float);
// alloca os vetores no host
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
// verifica sucesso
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
cerr << "Erro ao alocar os vetores no host" << endl;
exit(EXIT_FAILURE);
}
// inicializa com valores aleatórios
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// aloca os vetores na placa
float *d_A, *d_B, *d_C;
checkCudaErrors(cudaMalloc((void **)&d_A, size));
checkCudaErrors(cudaMalloc((void **)&d_B, size));
checkCudaErrors(cudaMalloc((void **)&d_C, size));
// copia o conteúdo do host para a placa
checkCudaErrors(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
// Roda o kernel
//IMPLEMENTAR!! determinar dimGrid e dimBlock
vectorAdd<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, numElements);
getLastCudaError("vectorAdd kernel");
// copia o resultado para o host
checkCudaErrors(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost));
// verifica se a soma está correta
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
cerr << "Erro! A soma não verifica!" << endl,
exit(EXIT_FAILURE);
}
}
cout << "A soma está correta! Passa no teste." << endl;
// libera a memória da placa
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
// libera a memória do host
free(h_A);
free(h_B);
free(h_C);
cout << "Fim" << endl;
return 0;
}
|
bc778ecf75d5a9d396033d62f4f923b5dd601a20.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "dali/kernels/test/kernel_poc_test.h"
namespace dali {
namespace kernels {
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
__global__ void
ElementwiseMAD(size_t n, Output *o, const Input1 *i1, const Input2 *i2, float alpha) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
o[idx] = i1[idx] * alpha + i2[idx];
}
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
struct MADKernelGPU {
static KernelRequirements GetRequirements(
KernelContext &context,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
KernelRequirements req;
req.output_shapes = { i1.shape };
return req;
}
static void Run(
KernelContext &context,
const OutListGPU<Output, 3> &o,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
auto n = i1.num_elements();
assert(i2.num_elements() == n);
assert(o.num_elements() == n);
size_t block = 1024;
size_t grid = (n + block - 1) / block;
hipLaunchKernelGGL(( ElementwiseMAD), dim3(grid), dim3(block), 0, context.gpu.stream, n, o.data, i1.data, i2.data, A);
}
};
template <typename Kernel_>
class KernelPoC_GPU : public ::testing::Test, public KernelPoCFixture<StorageGPU, Kernel_> {
};
using PoC_MAD_GPU = ::testing::Types<
MADKernelGPU<float, float, float>,
MADKernelGPU<int, float, float>,
MADKernelGPU<float, int, float>,
MADKernelGPU<int, int, int>
>;
TYPED_TEST_SUITE(KernelPoC_GPU, PoC_MAD_GPU);
TYPED_TEST(KernelPoC_GPU, All) {
this->RunImpl();
}
} // namespace kernels
} // namespace dali
|
bc778ecf75d5a9d396033d62f4f923b5dd601a20.cu
|
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "dali/kernels/test/kernel_poc_test.h"
namespace dali {
namespace kernels {
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
__global__ void
ElementwiseMAD(size_t n, Output *o, const Input1 *i1, const Input2 *i2, float alpha) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
o[idx] = i1[idx] * alpha + i2[idx];
}
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
struct MADKernelGPU {
static KernelRequirements GetRequirements(
KernelContext &context,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
KernelRequirements req;
req.output_shapes = { i1.shape };
return req;
}
static void Run(
KernelContext &context,
const OutListGPU<Output, 3> &o,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
auto n = i1.num_elements();
assert(i2.num_elements() == n);
assert(o.num_elements() == n);
size_t block = 1024;
size_t grid = (n + block - 1) / block;
ElementwiseMAD<<<grid, block, 0, context.gpu.stream>>>(n, o.data, i1.data, i2.data, A);
}
};
template <typename Kernel_>
class KernelPoC_GPU : public ::testing::Test, public KernelPoCFixture<StorageGPU, Kernel_> {
};
using PoC_MAD_GPU = ::testing::Types<
MADKernelGPU<float, float, float>,
MADKernelGPU<int, float, float>,
MADKernelGPU<float, int, float>,
MADKernelGPU<int, int, int>
>;
TYPED_TEST_SUITE(KernelPoC_GPU, PoC_MAD_GPU);
TYPED_TEST(KernelPoC_GPU, All) {
this->RunImpl();
}
} // namespace kernels
} // namespace dali
|
6da6cf56ce4aa8188f751031c4bc12c01df9d78c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <unittest/unittest.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__
void inclusive_scan_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::inclusive_scan(exec, first, last, result);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__
void exclusive_scan_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::exclusive_scan(exec, first, last, result);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename T>
__global__
void exclusive_scan_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result, T init)
{
thrust::exclusive_scan(exec, first, last, result, init);
}
template<typename T, typename ExecutionPolicy>
void TestScanDevice(ExecutionPolicy exec, const size_t n)
{
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::inclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
hipLaunchKernelGGL(( inclusive_scan_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_output.begin());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_output.begin());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin(), (T) 11);
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_output.begin(), (T) 11);
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
// in-place scans
h_output = h_input;
d_output = d_input;
thrust::inclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
hipLaunchKernelGGL(( inclusive_scan_kernel), dim3(1),dim3(1), 0, 0, exec, d_output.begin(), d_output.end(), d_output.begin());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
h_output = h_input;
d_output = d_input;
thrust::exclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(1),dim3(1), 0, 0, exec, d_output.begin(), d_output.end(), d_output.begin());
{
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
}
template<typename T>
struct TestScanDeviceSeq
{
void operator()(const size_t n)
{
TestScanDevice<T>(thrust::seq, n);
}
};
VariableUnitTest<TestScanDeviceSeq, IntegralTypes> TestScanDeviceSeqInstance;
template<typename T>
struct TestScanDeviceDevice
{
void operator()(const size_t n)
{
TestScanDevice<T>(thrust::device, n);
}
};
VariableUnitTest<TestScanDeviceDevice, IntegralTypes> TestScanDeviceDeviceInstance;
#endif
void TestScanCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector::iterator iter;
Vector input(5);
Vector result(5);
Vector output(5);
input[0] = 1; input[1] = 3; input[2] = -2; input[3] = 4; input[4] = -5;
Vector input_copy(input);
hipStream_t s;
hipStreamCreate(&s);
// inclusive scan
iter = thrust::inclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), output.begin());
hipStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan
iter = thrust::exclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), output.begin(), 0);
hipStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init
iter = thrust::exclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), output.begin(), 3);
hipStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inclusive scan with op
iter = thrust::inclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), output.begin(), thrust::plus<T>());
hipStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init and op
iter = thrust::exclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), output.begin(), 3, thrust::plus<T>());
hipStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inplace inclusive scan
input = input_copy;
iter = thrust::inclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), input.begin());
hipStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(std::size_t(iter - input.begin()), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with init
input = input_copy;
iter = thrust::exclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), input.begin(), 3);
hipStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(std::size_t(iter - input.begin()), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with implicit init=0
input = input_copy;
iter = thrust::exclusive_scan(thrust::hip::par.on(s), input.begin(), input.end(), input.begin());
hipStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(std::size_t(iter - input.begin()), input.size());
ASSERT_EQUAL(input, result);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestScanCudaStreams);
template <typename T>
struct const_ref_plus_mod3
{
T * table;
const_ref_plus_mod3(T * table) : table(table) {}
__host__ __device__
const T& operator()(T a, T b)
{
return table[(int) (a + b)];
}
};
static void TestInclusiveScanWithConstAccumulator(void)
{
// add numbers modulo 3 with external lookup table
thrust::device_vector<int> data(7);
data[0] = 0;
data[1] = 1;
data[2] = 2;
data[3] = 1;
data[4] = 2;
data[5] = 0;
data[6] = 1;
thrust::device_vector<int> table(6);
table[0] = 0;
table[1] = 1;
table[2] = 2;
table[3] = 0;
table[4] = 1;
table[5] = 2;
thrust::inclusive_scan(data.begin(), data.end(), data.begin(), const_ref_plus_mod3<int>(thrust::raw_pointer_cast(&table[0])));
ASSERT_EQUAL(data[0], 0);
ASSERT_EQUAL(data[1], 1);
ASSERT_EQUAL(data[2], 0);
ASSERT_EQUAL(data[3], 1);
ASSERT_EQUAL(data[4], 0);
ASSERT_EQUAL(data[5], 0);
ASSERT_EQUAL(data[6], 1);
}
DECLARE_UNITTEST(TestInclusiveScanWithConstAccumulator);
|
6da6cf56ce4aa8188f751031c4bc12c01df9d78c.cu
|
#include <cstdio>
#include <unittest/unittest.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__
void inclusive_scan_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::inclusive_scan(exec, first, last, result);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2>
__global__
void exclusive_scan_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::exclusive_scan(exec, first, last, result);
}
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename T>
__global__
void exclusive_scan_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result, T init)
{
thrust::exclusive_scan(exec, first, last, result, init);
}
template<typename T, typename ExecutionPolicy>
void TestScanDevice(ExecutionPolicy exec, const size_t n)
{
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::inclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
inclusive_scan_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_output.begin());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
exclusive_scan_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_output.begin());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin(), (T) 11);
exclusive_scan_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_output.begin(), (T) 11);
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
// in-place scans
h_output = h_input;
d_output = d_input;
thrust::inclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
inclusive_scan_kernel<<<1,1>>>(exec, d_output.begin(), d_output.end(), d_output.begin());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
h_output = h_input;
d_output = d_input;
thrust::exclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
exclusive_scan_kernel<<<1,1>>>(exec, d_output.begin(), d_output.end(), d_output.begin());
{
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
}
ASSERT_EQUAL(d_output, h_output);
}
template<typename T>
struct TestScanDeviceSeq
{
void operator()(const size_t n)
{
TestScanDevice<T>(thrust::seq, n);
}
};
VariableUnitTest<TestScanDeviceSeq, IntegralTypes> TestScanDeviceSeqInstance;
template<typename T>
struct TestScanDeviceDevice
{
void operator()(const size_t n)
{
TestScanDevice<T>(thrust::device, n);
}
};
VariableUnitTest<TestScanDeviceDevice, IntegralTypes> TestScanDeviceDeviceInstance;
#endif
void TestScanCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector::iterator iter;
Vector input(5);
Vector result(5);
Vector output(5);
input[0] = 1; input[1] = 3; input[2] = -2; input[3] = 4; input[4] = -5;
Vector input_copy(input);
cudaStream_t s;
cudaStreamCreate(&s);
// inclusive scan
iter = thrust::inclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), output.begin());
cudaStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan
iter = thrust::exclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), output.begin(), 0);
cudaStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init
iter = thrust::exclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), output.begin(), 3);
cudaStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inclusive scan with op
iter = thrust::inclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), output.begin(), thrust::plus<T>());
cudaStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init and op
iter = thrust::exclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), output.begin(), 3, thrust::plus<T>());
cudaStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(std::size_t(iter - output.begin()), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inplace inclusive scan
input = input_copy;
iter = thrust::inclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), input.begin());
cudaStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(std::size_t(iter - input.begin()), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with init
input = input_copy;
iter = thrust::exclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), input.begin(), 3);
cudaStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(std::size_t(iter - input.begin()), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with implicit init=0
input = input_copy;
iter = thrust::exclusive_scan(thrust::cuda::par.on(s), input.begin(), input.end(), input.begin());
cudaStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(std::size_t(iter - input.begin()), input.size());
ASSERT_EQUAL(input, result);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestScanCudaStreams);
template <typename T>
struct const_ref_plus_mod3
{
T * table;
const_ref_plus_mod3(T * table) : table(table) {}
__host__ __device__
const T& operator()(T a, T b)
{
return table[(int) (a + b)];
}
};
static void TestInclusiveScanWithConstAccumulator(void)
{
// add numbers modulo 3 with external lookup table
thrust::device_vector<int> data(7);
data[0] = 0;
data[1] = 1;
data[2] = 2;
data[3] = 1;
data[4] = 2;
data[5] = 0;
data[6] = 1;
thrust::device_vector<int> table(6);
table[0] = 0;
table[1] = 1;
table[2] = 2;
table[3] = 0;
table[4] = 1;
table[5] = 2;
thrust::inclusive_scan(data.begin(), data.end(), data.begin(), const_ref_plus_mod3<int>(thrust::raw_pointer_cast(&table[0])));
ASSERT_EQUAL(data[0], 0);
ASSERT_EQUAL(data[1], 1);
ASSERT_EQUAL(data[2], 0);
ASSERT_EQUAL(data[3], 1);
ASSERT_EQUAL(data[4], 0);
ASSERT_EQUAL(data[5], 0);
ASSERT_EQUAL(data[6], 1);
}
DECLARE_UNITTEST(TestInclusiveScanWithConstAccumulator);
|
4863cd09f8ce81c8fcb4ed1b683a06dd5b848a3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include "cuProxy.cuh"
#include "Vector.h"
template<typename T>
__device__
inline T constexpr pow2(const T v) noexcept { return v * v; }
template<class _Ty>
__device__ __host__ __forceinline__
int binarySearch(const _Ty* __restrict__ orderedList, int lowerbound, int upperbound, const _Ty& key) {
while (upperbound > lowerbound) {
int mid = (lowerbound + upperbound) >> 1;
if (mid == lowerbound) {
return orderedList[mid] == key ? lowerbound : -lowerbound;
}
else {
if (orderedList[mid] > key)
upperbound = mid;
else if (orderedList[mid] < key)
lowerbound = mid;
else
return mid;
}
}
return orderedList[lowerbound] == key ? lowerbound : -lowerbound;
}
__global__
void update_memorypool(int* current_memoryblock, int* current_offset, const int poolsize) {
*current_memoryblock++;
*current_offset -= poolsize;
}
__device__ __forceinline__
bool constraints1(const Vector3& point) {
return true;
}
__device__ __forceinline__
bool constraints2(const Vector3& point) {
return true;
}
__global__
void cuLSH(
/*lv.1 search*/
const GPU_Lsh_Func *funcs, const GPU_HashTable *hashtable, const int64_t *table_contents, const int* segments_in_table,
const GPU_Segments *segments, int* temp, const Vector3 *f_streamlines, const int* lineoffsets, const int n_lines,
const int n_pts, const int set_size, const int set_associative,
/*lv.2 search*/
const GPU_SegmentsLv2 *segs_lv2, const short* lv2_buckets,
/*memory_pool*/
int** memory_pool, int64_t* memory_pool_index, int* current_memoryblock, int* current_offset, int* _idxs, const int poolsize,
int* finished
) {
extern __shared__ unsigned char sh_tmp[];
const int tmp_block_size = (set_associative - 1) * set_size;
int _idx = blockIdx.x;
temp += (_idx) * tmp_block_size;
int i = _idxs[_idx] ? _idxs[_idx] : _idx;
constexpr int get_tag = 0xf;
bool overflew = 0;
//bool sgn = false;
for (; i < n_pts; i += gridDim.x) {
int cnt_result = 0;
//lv.1 search
#pragma region lv.1 search
for (int t = 0; t < L; t++)
{
int64_t fingerprint1 = 0, fingerprint2 = 0;
for (int f = 0; f < K; f++) {
const GPU_Lsh_Func curr_func = funcs[hashtable[t].LSHFuncs[f]];
const int n_buckets = curr_func.n_buckets;
const int func_val = curr_func(f_streamlines[i]);
int64_t tmp_fp1 = hashtable[t].r1[f] * func_val;
int64_t tmp_fp2 = hashtable[t].r1[f] * func_val;
tmp_fp1 = 5 * (tmp_fp1 >> 32ll) + (tmp_fp1 & 0xffffffffll);
tmp_fp2 = 5 * (tmp_fp2 >> 32ll) + (tmp_fp2 & 0xffffffffll);
fingerprint1 += (tmp_fp1 >> 32ll) ? (tmp_fp1 - Prime) : tmp_fp1;
fingerprint2 += (tmp_fp2 >> 32ll) ? (tmp_fp2 - Prime) : tmp_fp2;
fingerprint1 = (fingerprint1 >> 32ll) ? (fingerprint1 - Prime) : fingerprint1;
fingerprint2 = (fingerprint2 >> 32ll) ? (fingerprint2 - Prime) : fingerprint2;
}
fingerprint1 %= TABLESIZE;
fingerprint2 %= Prime;
const int table_search_begin = hashtable[t].table_offsets[fingerprint1],
table_search_end = hashtable[t].table_offsets[fingerprint1 + 1];
int found = binarySearch(table_contents, table_search_begin, table_search_end, fingerprint2);
if (found > 0) {
const unsigned line = segments[found].line;
const float dist = segments[found].centroid.sqDist(f_streamlines[i]);
if (dist < 1.f && constraints1(segments[found].centroid))
{
const int position = line / set_associative;
const int tag = line % set_associative;
const int current_set = (temp[position] &0x7fffffff) >> 27;
constexpr int set_increment = 1 << 27;
if (current_set < set_associative)//max of 16 way set-associative; availible slots
{
bool exists = false;
for (int j = 0; j < current_set; j++) {
const int this_segment = ((temp[position + j * set_size] & 0x07ffffff) >> 4);
if (temp[position + j * set_size] & get_tag == tag) {
if (dist < segments[this_segment].centroid.sqDist(f_streamlines[i]))
{
temp[position + j * set_size] &= 0xf800000f;
temp[position + j * set_size] |= (found << 4);
exists = true;
break;
}
}
}
if (!exists) {
temp[position] += set_increment;// total_sets ++
temp[position + (current_set + 1) * set_size] = found << 4 | tag;
}
}
}
}
}
#pragma endregion
#pragma region lv.2 search
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
const int this_tag = temp[j] & get_tag;
const int this_segment = ((temp[j] & 0x07ffffff) >> 4);
const int this_line = j << 4 + this_tag;
const GPU_SegmentsLv2& this_seglv2 = segs_lv2[this_segment];
const float projection = (f_streamlines[i] - this_seglv2.origin).project(this_seglv2.projector) / this_seglv2.width;
int key = projection;
if (key < 0)
key = 0;
else if (key > this_seglv2.length)
key = this_seglv2.length;
key += this_seglv2.bucket_pos_offset;
const int nearest_point = lv2_buckets[key];
if (!constraints2(f_streamlines[lineoffsets[this_line] + nearest_point]))
temp[j] = 0x80000000;
else
++cnt_result;
}
}
#pragma endregion
#pragma region storing
int this_offset = atomicAdd(current_offset, cnt_result);
int this_memoryblock = *current_memoryblock;
int this_end = this_offset + cnt_result;
int curr_end, this_count = 0;
memory_pool_index[i] =
(this_memoryblock << 54ll) | (this_offset << 27ll) | cnt_result;
if (this_end > poolsize) {
if (this_offset > poolsize)
{
this_offset -= poolsize;
curr_end = this_end - poolsize;
++this_memoryblock;
overflew = true;
}
else {
curr_end = poolsize;
overflew = true;
}
}
else
curr_end = this_end;
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
++this_count;
memory_pool[this_memoryblock][this_offset++] = temp[j];
if (this_offset >= curr_end && overflew) {
if (this_count >= cnt_result)
break;
this_count = 0;
curr_end = this_end - poolsize;
++this_memoryblock;
}
temp[j] = 0x80000000;
}
}
if (overflew)
break;
#pragma endregion
}
_idxs[_idx] = i;
atomicAnd(finished, i >= n_pts);
}
__device__
unsigned int findRange(const int* orderedList, int lowerbound, int upperbound, const int key) {
int mid;
while (lowerbound + 1 < upperbound) {
mid = (lowerbound + upperbound) >> 1;
if (orderedList[mid] < key)
lowerbound = mid;
else if (orderedList[mid] > key)
upperbound = mid;
else
break;
}
if (orderedList[mid] != key)
return 0xffffffff;
int upe = mid, lowe = mid;
while (lowerbound < lowe - 1) {
mid = (lowerbound + lowe) >> 1;
if (orderedList[mid] < key)
lowerbound = mid;
else
lowe = mid;
}
while (upperbound > upe + 1) {
mid = (upperbound + upe) >> 1;
if (orderedList[mid] > key)
upperbound = mid;
else
upe = mid;
}
return lowerbound | ((upperbound - lowerbound)<<20);
}
__global__
void cuLSH_lv1(
/*lv.1 search*/
const GPU_Lsh_Func *funcs, const GPU_HashTable *hashtable, const int64_t *table_contents, const unsigned int* segments_in_table,
const GPU_Segments *segments, int* temp, const Vector3 *f_streamlines, const int* lineoffsets, const int n_lines,
const int n_pts, const int n_segments, unsigned int** projections
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (; i < n_pts; i += gridDim.x * blockDim.x) {
for (int t = 0; t < L; t++)
{
int64_t fingerprint1 = 0, fingerprint2 = 0;
for (int f = 0; f < K; f++) {
const GPU_Lsh_Func curr_func = funcs[hashtable[t].LSHFuncs[f]];
const int n_buckets = curr_func.n_buckets;
const int func_val = curr_func(f_streamlines[i]);
int64_t tmp_fp1 = hashtable[t].r1[f] * func_val;
int64_t tmp_fp2 = hashtable[t].r1[f] * func_val;
tmp_fp1 = tmp_fp1 % TABLESIZE;
tmp_fp2 = tmp_fp2 % Prime;
fingerprint1 += tmp_fp1;
fingerprint2 += tmp_fp2;
fingerprint1 %= TABLESIZE;
fingerprint2 %= Prime;
}
fingerprint1 %= TABLESIZE;
fingerprint2 %= Prime;
const int table_search_begin = hashtable[t].table_offsets[fingerprint1],
table_search_end = hashtable[t].table_offsets[fingerprint1 + 1];
int found = binarySearch(table_contents + t*n_segments, table_search_begin, table_search_end, fingerprint2);
if (found == -1)
projections[t][i] = -1;
else
projections[t][i] = segments_in_table[found];//Segments that has the same fingerprints (1&2)
}
}
}
__global__
void cuLSH_lv2(
/*environments*/
const GPU_Segments *segments, unsigned char* temp, const Vector3 *f_streamlines, const int* lineoffsets, const int n_lines,
const int n_pts, const int set_size, const int set_associative, const unsigned int** projections,
/*lv.2 search*/
const GPU_SegmentsLv2 *segs_lv2, const short* lv2_buckets,
/*memory_pool*/
int** memory_pool, int64_t* memory_pool_index, int* current_memoryblock, int* current_offset, int* _idxs, const int poolsize,
int* finished
) {
extern __shared__ unsigned char sh_tmp[];
unsigned char* ptr_tmp = sh_tmp;
const int tmp_block_size = (set_associative - 1) * set_size;
int _idx = blockIdx.x * blockDim.x + threadIdx.x;
temp += (_idx)* tmp_block_size;
int i = _idxs[_idx] ? _idxs[_idx] : _idx;
constexpr int get_tag = 0xf;
const unsigned int cache_page_size = 384;//49512 (bytes per block) /128 (blocks) /1 (bytes per segment)
auto get_cache = [&temp, &ptr_tmp, &cache_page_size](const int _Index) -> unsigned char&/*constexpr*/
{ return (_Index < cache_page_size) ? ptr_tmp[_Index] : (temp)[_Index - cache_page_size]; };
bool overflew = 0;
//bool sgn = false;
for (; i < n_pts; i += gridDim.x*blockDim.x) {
int cnt_result = 0;
#pragma region lv.2 search
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
const int this_tag = temp[j] & get_tag;
const int this_segment = ((temp[j] & 0x07ffffff) >> 4);
const int this_line = j << 4 + this_tag;
const GPU_SegmentsLv2& this_seglv2 = segs_lv2[this_segment];
const float projection = (f_streamlines[i] - this_seglv2.origin).project(this_seglv2.projector) / this_seglv2.width;
int key = projection;
if (key < 0)
key = 0;
else if (key > this_seglv2.length)
key = this_seglv2.length;
key += this_seglv2.bucket_pos_offset;
const int nearest_point = lv2_buckets[key];
if (!constraints2(f_streamlines[lineoffsets[this_line] + nearest_point]))
temp[j] = 0x80000000;
else
{
++cnt_result;
}
}
}
#pragma endregion
#pragma region storage
int this_offset = atomicAdd(current_offset, cnt_result);
int this_memoryblock = *current_memoryblock;
int this_end = this_offset + cnt_result;
int curr_end, this_count = 0;
memory_pool_index[i] =
(this_memoryblock << 54ll) | (this_offset << 27ll) | cnt_result;
if (this_end > poolsize) {
if (this_offset > poolsize)
{
this_offset -= poolsize;
curr_end = this_end - poolsize;
++this_memoryblock;
overflew = true;
}
else {
curr_end = poolsize;
overflew = true;
}
}
else
curr_end = this_end;
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
++this_count;
memory_pool[this_memoryblock][this_offset++] = temp[j];
if (this_offset >= curr_end && overflew) {
if (this_count >= cnt_result)
break;
this_count = 0;
curr_end = this_end - poolsize;
++this_memoryblock;
}
temp[j] = 0x80000000;
}
}
if (overflew)
break;
#pragma endregion
}
_idxs[_idx] = i;
atomicAnd(finished, i >= n_pts);
}
__global__
void parallelized_memory_allocation_test(
int** memory_pool, int64_t* memory_pool_index, int* current_memoryblock, int* current_offset, int n,
int* _idxs, const int poolsize, hiprandStateMRG32k3a_t *state, int* finished
) {
const int _idx = threadIdx.x + blockIdx.x * blockDim.x;
int idx = _idxs[_idx] ? _idxs[_idx] : _idx;//resume last work
bool overflew = 0;
for (; idx < n; idx += blockDim.x * gridDim.x) {
const int result_count = hiprand_uniform(state) * 32.f;
int this_offset = atomicAdd(current_offset, result_count);
int this_memoryblock = *current_memoryblock;
int this_end = this_offset + result_count;
memory_pool_index[idx] =
(this_memoryblock << 54ll) | (this_offset << 27ll) | result_count;
if (this_end > poolsize)
{
for (; this_offset < poolsize; ++this_offset) {
memory_pool[this_memoryblock][this_offset] = idx * 10000 + this_offset;
}
this_offset = 0;
this_end -= poolsize;
++this_memoryblock;
overflew = true;
}
for (; this_offset < this_end; ++this_offset) {
memory_pool[this_memoryblock][this_offset] = idx * 10000 + this_offset;
}
if (overflew)
break;
}
_idxs[_idx] = idx;
atomicAnd(finished, idx >= n);
}
//__global__
//void cuLineHashing(//Lv.2 search
//
// int *__restrict__ results, int n_results,
// const GPU_SegmentsLv2 *segs_lv2, const float *f_stremlines, const int* lineoffsets,
// const short* lv2_buckets, const int n_lines, const int n_pts
//
//) {
//
// int i = threadIdx.x;
// for (; i < n_lines; i+= blockDim.x) {
// int j = blockIdx.x;
// for (; j < lineoffsets[i + 1]; j += gridDim.x) {
//
// int ptnum = lineoffsets[i] + j;
//
// for (int k = 0; k < maxNN; k++)
// if (results[ptnum*maxNN + k] > 0)
// {
// const int this_seg = results[ptnum * maxNN + k];
// const int ptoffset = segs_lv2[this_seg].bucket_pos_offset;
// const int bucket_begin = segs_lv2[this_seg].bucket_pos_offset;
// float projection = 0;
//#pragma unroll
// for (int _dim = 0; _dim < 3; _dim++)
// projection +=
// (f_stremlines[ptnum * 3 + _dim] - (segs_lv2[this_seg].origin)[_dim]) * segs_lv2[this_seg].projector[_dim];
//
// int bucket = ::floor(projection);
// if (projection < 0)
// bucket = 0;
// else if (projection > segs_lv2[this_seg].width - 1)
// bucket = segs_lv2[this_seg].width - 1;
//
// results[ptnum * maxNN + k] = segs_lv2[this_seg].line << 16 + (ptoffset + lv2_buckets[bucket_begin + bucket]);//n_lines < 65535 && pt_on_line < 65535
// }
// else
// break;
//
//
// }
// }
//}
__global__
void cuLineHashing_mp(
int ** memory_pool
) {
}
__global__
void cuHeapify(
_in_ _out_ float *variation, _in_ _out_ float*distances, _in_ _out_ float *points,
const int n_lines, const int n_pts
) {
}
__global__
void cuSimilarity(
_out_ float *variation, _out_ float* distances, _out_ int*points,
_in_ const float *f_streamlines, _in_ const int * lineoffsets, _in_ const int* results,
const int n_lines, const int n_pts
) {
int i = threadIdx.x;
for (; i < n_lines; i += blockDim.x) {
int j = blockIdx.x;
for (; j < lineoffsets[i + 1]; j += gridDim.x) {
const int ptnum = lineoffsets[i] + j;
for (int k = 0; k < maxNN; k++)
if (results[ptnum*maxNN + k] != -1) {
const unsigned int targetline = ((unsigned)results[ptnum*maxNN + k]) >> 16;
const unsigned int targetpt_on_line = ((unsigned)results[ptnum*maxNN + k] & 0xffff);
const unsigned int target_ptnum = lineoffsets[targetline] + targetpt_on_line;
int begin = lineoffsets[i] + (j > similarity_window) ? (j - similarity_window) : 0;
int end = lineoffsets[i] + j + similarity_window;
end = (end >= lineoffsets[i + 1]) ? lineoffsets[i + 1] - 1 : end;
int forward = ptnum - begin;
int backward = end - ptnum;
forward = __macro_min(targetpt_on_line, forward);
backward = __macro_min(lineoffsets[targetline + 1] - lineoffsets[targetline] - targetpt_on_line - 1, backward);
float center_dist = 0;
#pragma unroll
for (int _dim = 0; _dim < 3; _dim++)
center_dist += pow2(f_streamlines[ptnum*3 + _dim] - f_streamlines[target_ptnum*3 + _dim]);
center_dist = sqrtf(center_dist);
float _variation = 0;
int start_this = ptnum - forward, start_target = target_ptnum - forward;
for (; start_this < ptnum; start_this++, start_target++) {
float _dist = 0;
#pragma unroll
for (int _dim = 0; _dim < 3; _dim++)
_dist += pow2(f_streamlines[start_this * 3 + _dim] - f_streamlines[start_target * 3 + _dim]);
_variation += pow2(center_dist - sqrtf(_dist));
}
for (; start_this < ptnum + backward; start_this++, start_target++) {
float _dist = 0;
#pragma unroll
for (int _dim = 0; _dim < 3; _dim++)
_dist += pow2(f_streamlines[start_this * 3 + _dim] - f_streamlines[start_target * 3 + _dim]);
_variation += pow2(center_dist - sqrtf(_dist));
}
const int interval = backward + forward - 1;
if (interval > 0)
_variation /= interval;
else
_variation = 0;
distances[ptnum*maxNN + k] = center_dist;
variation[ptnum * maxNN + k] = _variation;
}
else break;
}
}
}
namespace cudadevice_variables {
GPU_SegmentsLv2* segslv2; //Lv.2 hash projector
GPU_Segments* segs;//centroid + line No. for Lv.1 LSH
float* l2buckets;
GPU_HashTable *d_hash;
GPU_Lsh_Func *d_funcs;
float* d_streamlines; // FileIO:: f_streamlines[0]
int* d_lineoffsets;// Streamline::sizes;
//Memory pool
int *d_cof,//current offset
*d_cmb,
*d_fin,
*d_mp,
*d_tmp;
int64_t* d_idxs;
const int poolsize = 134217728;//128 Words = 512MB
const int tmp_size = 4096;
const int set_associative = 2; // 2 pre allocated sets of tmp pages, 1 dynamically allocated tmp
//scale variables
int n_streamlines, n_points, n_segments;
}
//---------------------------------------------------------------------\\
--------------------- NVCC Compiled Host functions. ---------------------
void cudaInit(
Vector3 *f_streamlines, int* lineoffsets, GPU_Segments* segments,
GPU_Lsh_Func *lsh_funcs, GPU_HashTable *hash_table,
GPU_SegmentsLv2 *segslv2, float* l2buckets
)
{
}
void hipLaunch() {
}
void cudaFinalize() {
}
|
4863cd09f8ce81c8fcb4ed1b683a06dd5b848a3f.cu
|
#include <stdint.h>
#include "cuProxy.cuh"
#include "Vector.h"
template<typename T>
__device__
inline T constexpr pow2(const T v) noexcept { return v * v; }
template<class _Ty>
__device__ __host__ __forceinline__
int binarySearch(const _Ty* __restrict__ orderedList, int lowerbound, int upperbound, const _Ty& key) {
while (upperbound > lowerbound) {
int mid = (lowerbound + upperbound) >> 1;
if (mid == lowerbound) {
return orderedList[mid] == key ? lowerbound : -lowerbound;
}
else {
if (orderedList[mid] > key)
upperbound = mid;
else if (orderedList[mid] < key)
lowerbound = mid;
else
return mid;
}
}
return orderedList[lowerbound] == key ? lowerbound : -lowerbound;
}
__global__
void update_memorypool(int* current_memoryblock, int* current_offset, const int poolsize) {
*current_memoryblock++;
*current_offset -= poolsize;
}
__device__ __forceinline__
bool constraints1(const Vector3& point) {
return true;
}
__device__ __forceinline__
bool constraints2(const Vector3& point) {
return true;
}
__global__
void cuLSH(
/*lv.1 search*/
const GPU_Lsh_Func *funcs, const GPU_HashTable *hashtable, const int64_t *table_contents, const int* segments_in_table,
const GPU_Segments *segments, int* temp, const Vector3 *f_streamlines, const int* lineoffsets, const int n_lines,
const int n_pts, const int set_size, const int set_associative,
/*lv.2 search*/
const GPU_SegmentsLv2 *segs_lv2, const short* lv2_buckets,
/*memory_pool*/
int** memory_pool, int64_t* memory_pool_index, int* current_memoryblock, int* current_offset, int* _idxs, const int poolsize,
int* finished
) {
extern __shared__ unsigned char sh_tmp[];
const int tmp_block_size = (set_associative - 1) * set_size;
int _idx = blockIdx.x;
temp += (_idx) * tmp_block_size;
int i = _idxs[_idx] ? _idxs[_idx] : _idx;
constexpr int get_tag = 0xf;
bool overflew = 0;
//bool sgn = false;
for (; i < n_pts; i += gridDim.x) {
int cnt_result = 0;
//lv.1 search
#pragma region lv.1 search
for (int t = 0; t < L; t++)
{
int64_t fingerprint1 = 0, fingerprint2 = 0;
for (int f = 0; f < K; f++) {
const GPU_Lsh_Func curr_func = funcs[hashtable[t].LSHFuncs[f]];
const int n_buckets = curr_func.n_buckets;
const int func_val = curr_func(f_streamlines[i]);
int64_t tmp_fp1 = hashtable[t].r1[f] * func_val;
int64_t tmp_fp2 = hashtable[t].r1[f] * func_val;
tmp_fp1 = 5 * (tmp_fp1 >> 32ll) + (tmp_fp1 & 0xffffffffll);
tmp_fp2 = 5 * (tmp_fp2 >> 32ll) + (tmp_fp2 & 0xffffffffll);
fingerprint1 += (tmp_fp1 >> 32ll) ? (tmp_fp1 - Prime) : tmp_fp1;
fingerprint2 += (tmp_fp2 >> 32ll) ? (tmp_fp2 - Prime) : tmp_fp2;
fingerprint1 = (fingerprint1 >> 32ll) ? (fingerprint1 - Prime) : fingerprint1;
fingerprint2 = (fingerprint2 >> 32ll) ? (fingerprint2 - Prime) : fingerprint2;
}
fingerprint1 %= TABLESIZE;
fingerprint2 %= Prime;
const int table_search_begin = hashtable[t].table_offsets[fingerprint1],
table_search_end = hashtable[t].table_offsets[fingerprint1 + 1];
int found = binarySearch(table_contents, table_search_begin, table_search_end, fingerprint2);
if (found > 0) {
const unsigned line = segments[found].line;
const float dist = segments[found].centroid.sqDist(f_streamlines[i]);
if (dist < 1.f && constraints1(segments[found].centroid))
{
const int position = line / set_associative;
const int tag = line % set_associative;
const int current_set = (temp[position] &0x7fffffff) >> 27;
constexpr int set_increment = 1 << 27;
if (current_set < set_associative)//max of 16 way set-associative; availible slots
{
bool exists = false;
for (int j = 0; j < current_set; j++) {
const int this_segment = ((temp[position + j * set_size] & 0x07ffffff) >> 4);
if (temp[position + j * set_size] & get_tag == tag) {
if (dist < segments[this_segment].centroid.sqDist(f_streamlines[i]))
{
temp[position + j * set_size] &= 0xf800000f;
temp[position + j * set_size] |= (found << 4);
exists = true;
break;
}
}
}
if (!exists) {
temp[position] += set_increment;// total_sets ++
temp[position + (current_set + 1) * set_size] = found << 4 | tag;
}
}
}
}
}
#pragma endregion
#pragma region lv.2 search
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
const int this_tag = temp[j] & get_tag;
const int this_segment = ((temp[j] & 0x07ffffff) >> 4);
const int this_line = j << 4 + this_tag;
const GPU_SegmentsLv2& this_seglv2 = segs_lv2[this_segment];
const float projection = (f_streamlines[i] - this_seglv2.origin).project(this_seglv2.projector) / this_seglv2.width;
int key = projection;
if (key < 0)
key = 0;
else if (key > this_seglv2.length)
key = this_seglv2.length;
key += this_seglv2.bucket_pos_offset;
const int nearest_point = lv2_buckets[key];
if (!constraints2(f_streamlines[lineoffsets[this_line] + nearest_point]))
temp[j] = 0x80000000;
else
++cnt_result;
}
}
#pragma endregion
#pragma region storing
int this_offset = atomicAdd(current_offset, cnt_result);
int this_memoryblock = *current_memoryblock;
int this_end = this_offset + cnt_result;
int curr_end, this_count = 0;
memory_pool_index[i] =
(this_memoryblock << 54ll) | (this_offset << 27ll) | cnt_result;
if (this_end > poolsize) {
if (this_offset > poolsize)
{
this_offset -= poolsize;
curr_end = this_end - poolsize;
++this_memoryblock;
overflew = true;
}
else {
curr_end = poolsize;
overflew = true;
}
}
else
curr_end = this_end;
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
++this_count;
memory_pool[this_memoryblock][this_offset++] = temp[j];
if (this_offset >= curr_end && overflew) {
if (this_count >= cnt_result)
break;
this_count = 0;
curr_end = this_end - poolsize;
++this_memoryblock;
}
temp[j] = 0x80000000;
}
}
if (overflew)
break;
#pragma endregion
}
_idxs[_idx] = i;
atomicAnd(finished, i >= n_pts);
}
__device__
unsigned int findRange(const int* orderedList, int lowerbound, int upperbound, const int key) {
int mid;
while (lowerbound + 1 < upperbound) {
mid = (lowerbound + upperbound) >> 1;
if (orderedList[mid] < key)
lowerbound = mid;
else if (orderedList[mid] > key)
upperbound = mid;
else
break;
}
if (orderedList[mid] != key)
return 0xffffffff;
int upe = mid, lowe = mid;
while (lowerbound < lowe - 1) {
mid = (lowerbound + lowe) >> 1;
if (orderedList[mid] < key)
lowerbound = mid;
else
lowe = mid;
}
while (upperbound > upe + 1) {
mid = (upperbound + upe) >> 1;
if (orderedList[mid] > key)
upperbound = mid;
else
upe = mid;
}
return lowerbound | ((upperbound - lowerbound)<<20);
}
__global__
void cuLSH_lv1(
/*lv.1 search*/
const GPU_Lsh_Func *funcs, const GPU_HashTable *hashtable, const int64_t *table_contents, const unsigned int* segments_in_table,
const GPU_Segments *segments, int* temp, const Vector3 *f_streamlines, const int* lineoffsets, const int n_lines,
const int n_pts, const int n_segments, unsigned int** projections
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (; i < n_pts; i += gridDim.x * blockDim.x) {
for (int t = 0; t < L; t++)
{
int64_t fingerprint1 = 0, fingerprint2 = 0;
for (int f = 0; f < K; f++) {
const GPU_Lsh_Func curr_func = funcs[hashtable[t].LSHFuncs[f]];
const int n_buckets = curr_func.n_buckets;
const int func_val = curr_func(f_streamlines[i]);
int64_t tmp_fp1 = hashtable[t].r1[f] * func_val;
int64_t tmp_fp2 = hashtable[t].r1[f] * func_val;
tmp_fp1 = tmp_fp1 % TABLESIZE;
tmp_fp2 = tmp_fp2 % Prime;
fingerprint1 += tmp_fp1;
fingerprint2 += tmp_fp2;
fingerprint1 %= TABLESIZE;
fingerprint2 %= Prime;
}
fingerprint1 %= TABLESIZE;
fingerprint2 %= Prime;
const int table_search_begin = hashtable[t].table_offsets[fingerprint1],
table_search_end = hashtable[t].table_offsets[fingerprint1 + 1];
int found = binarySearch(table_contents + t*n_segments, table_search_begin, table_search_end, fingerprint2);
if (found == -1)
projections[t][i] = -1;
else
projections[t][i] = segments_in_table[found];//Segments that has the same fingerprints (1&2)
}
}
}
__global__
void cuLSH_lv2(
/*environments*/
const GPU_Segments *segments, unsigned char* temp, const Vector3 *f_streamlines, const int* lineoffsets, const int n_lines,
const int n_pts, const int set_size, const int set_associative, const unsigned int** projections,
/*lv.2 search*/
const GPU_SegmentsLv2 *segs_lv2, const short* lv2_buckets,
/*memory_pool*/
int** memory_pool, int64_t* memory_pool_index, int* current_memoryblock, int* current_offset, int* _idxs, const int poolsize,
int* finished
) {
extern __shared__ unsigned char sh_tmp[];
unsigned char* ptr_tmp = sh_tmp;
const int tmp_block_size = (set_associative - 1) * set_size;
int _idx = blockIdx.x * blockDim.x + threadIdx.x;
temp += (_idx)* tmp_block_size;
int i = _idxs[_idx] ? _idxs[_idx] : _idx;
constexpr int get_tag = 0xf;
const unsigned int cache_page_size = 384;//49512 (bytes per block) /128 (blocks) /1 (bytes per segment)
auto get_cache = [&temp, &ptr_tmp, &cache_page_size](const int _Index) -> unsigned char&/*constexpr*/
{ return (_Index < cache_page_size) ? ptr_tmp[_Index] : (temp)[_Index - cache_page_size]; };
bool overflew = 0;
//bool sgn = false;
for (; i < n_pts; i += gridDim.x*blockDim.x) {
int cnt_result = 0;
#pragma region lv.2 search
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
const int this_tag = temp[j] & get_tag;
const int this_segment = ((temp[j] & 0x07ffffff) >> 4);
const int this_line = j << 4 + this_tag;
const GPU_SegmentsLv2& this_seglv2 = segs_lv2[this_segment];
const float projection = (f_streamlines[i] - this_seglv2.origin).project(this_seglv2.projector) / this_seglv2.width;
int key = projection;
if (key < 0)
key = 0;
else if (key > this_seglv2.length)
key = this_seglv2.length;
key += this_seglv2.bucket_pos_offset;
const int nearest_point = lv2_buckets[key];
if (!constraints2(f_streamlines[lineoffsets[this_line] + nearest_point]))
temp[j] = 0x80000000;
else
{
++cnt_result;
}
}
}
#pragma endregion
#pragma region storage
int this_offset = atomicAdd(current_offset, cnt_result);
int this_memoryblock = *current_memoryblock;
int this_end = this_offset + cnt_result;
int curr_end, this_count = 0;
memory_pool_index[i] =
(this_memoryblock << 54ll) | (this_offset << 27ll) | cnt_result;
if (this_end > poolsize) {
if (this_offset > poolsize)
{
this_offset -= poolsize;
curr_end = this_end - poolsize;
++this_memoryblock;
overflew = true;
}
else {
curr_end = poolsize;
overflew = true;
}
}
else
curr_end = this_end;
for (int j = 0; j < tmp_block_size; ++j) {
if (temp[j] != 0x80000000) {
++this_count;
memory_pool[this_memoryblock][this_offset++] = temp[j];
if (this_offset >= curr_end && overflew) {
if (this_count >= cnt_result)
break;
this_count = 0;
curr_end = this_end - poolsize;
++this_memoryblock;
}
temp[j] = 0x80000000;
}
}
if (overflew)
break;
#pragma endregion
}
_idxs[_idx] = i;
atomicAnd(finished, i >= n_pts);
}
__global__
void parallelized_memory_allocation_test(
int** memory_pool, int64_t* memory_pool_index, int* current_memoryblock, int* current_offset, int n,
int* _idxs, const int poolsize, curandStateMRG32k3a_t *state, int* finished
) {
const int _idx = threadIdx.x + blockIdx.x * blockDim.x;
int idx = _idxs[_idx] ? _idxs[_idx] : _idx;//resume last work
bool overflew = 0;
for (; idx < n; idx += blockDim.x * gridDim.x) {
const int result_count = curand_uniform(state) * 32.f;
int this_offset = atomicAdd(current_offset, result_count);
int this_memoryblock = *current_memoryblock;
int this_end = this_offset + result_count;
memory_pool_index[idx] =
(this_memoryblock << 54ll) | (this_offset << 27ll) | result_count;
if (this_end > poolsize)
{
for (; this_offset < poolsize; ++this_offset) {
memory_pool[this_memoryblock][this_offset] = idx * 10000 + this_offset;
}
this_offset = 0;
this_end -= poolsize;
++this_memoryblock;
overflew = true;
}
for (; this_offset < this_end; ++this_offset) {
memory_pool[this_memoryblock][this_offset] = idx * 10000 + this_offset;
}
if (overflew)
break;
}
_idxs[_idx] = idx;
atomicAnd(finished, idx >= n);
}
//__global__
//void cuLineHashing(//Lv.2 search
//
// int *__restrict__ results, int n_results,
// const GPU_SegmentsLv2 *segs_lv2, const float *f_stremlines, const int* lineoffsets,
// const short* lv2_buckets, const int n_lines, const int n_pts
//
//) {
//
// int i = threadIdx.x;
// for (; i < n_lines; i+= blockDim.x) {
// int j = blockIdx.x;
// for (; j < lineoffsets[i + 1]; j += gridDim.x) {
//
// int ptnum = lineoffsets[i] + j;
//
// for (int k = 0; k < maxNN; k++)
// if (results[ptnum*maxNN + k] > 0)
// {
// const int this_seg = results[ptnum * maxNN + k];
// const int ptoffset = segs_lv2[this_seg].bucket_pos_offset;
// const int bucket_begin = segs_lv2[this_seg].bucket_pos_offset;
// float projection = 0;
//#pragma unroll
// for (int _dim = 0; _dim < 3; _dim++)
// projection +=
// (f_stremlines[ptnum * 3 + _dim] - (segs_lv2[this_seg].origin)[_dim]) * segs_lv2[this_seg].projector[_dim];
//
// int bucket = std::floor(projection);
// if (projection < 0)
// bucket = 0;
// else if (projection > segs_lv2[this_seg].width - 1)
// bucket = segs_lv2[this_seg].width - 1;
//
// results[ptnum * maxNN + k] = segs_lv2[this_seg].line << 16 + (ptoffset + lv2_buckets[bucket_begin + bucket]);//n_lines < 65535 && pt_on_line < 65535
// }
// else
// break;
//
//
// }
// }
//}
__global__
void cuLineHashing_mp(
int ** memory_pool
) {
}
__global__
void cuHeapify(
_in_ _out_ float *variation, _in_ _out_ float*distances, _in_ _out_ float *points,
const int n_lines, const int n_pts
) {
}
__global__
void cuSimilarity(
_out_ float *variation, _out_ float* distances, _out_ int*points,
_in_ const float *f_streamlines, _in_ const int * lineoffsets, _in_ const int* results,
const int n_lines, const int n_pts
) {
int i = threadIdx.x;
for (; i < n_lines; i += blockDim.x) {
int j = blockIdx.x;
for (; j < lineoffsets[i + 1]; j += gridDim.x) {
const int ptnum = lineoffsets[i] + j;
for (int k = 0; k < maxNN; k++)
if (results[ptnum*maxNN + k] != -1) {
const unsigned int targetline = ((unsigned)results[ptnum*maxNN + k]) >> 16;
const unsigned int targetpt_on_line = ((unsigned)results[ptnum*maxNN + k] & 0xffff);
const unsigned int target_ptnum = lineoffsets[targetline] + targetpt_on_line;
int begin = lineoffsets[i] + (j > similarity_window) ? (j - similarity_window) : 0;
int end = lineoffsets[i] + j + similarity_window;
end = (end >= lineoffsets[i + 1]) ? lineoffsets[i + 1] - 1 : end;
int forward = ptnum - begin;
int backward = end - ptnum;
forward = __macro_min(targetpt_on_line, forward);
backward = __macro_min(lineoffsets[targetline + 1] - lineoffsets[targetline] - targetpt_on_line - 1, backward);
float center_dist = 0;
#pragma unroll
for (int _dim = 0; _dim < 3; _dim++)
center_dist += pow2(f_streamlines[ptnum*3 + _dim] - f_streamlines[target_ptnum*3 + _dim]);
center_dist = sqrtf(center_dist);
float _variation = 0;
int start_this = ptnum - forward, start_target = target_ptnum - forward;
for (; start_this < ptnum; start_this++, start_target++) {
float _dist = 0;
#pragma unroll
for (int _dim = 0; _dim < 3; _dim++)
_dist += pow2(f_streamlines[start_this * 3 + _dim] - f_streamlines[start_target * 3 + _dim]);
_variation += pow2(center_dist - sqrtf(_dist));
}
for (; start_this < ptnum + backward; start_this++, start_target++) {
float _dist = 0;
#pragma unroll
for (int _dim = 0; _dim < 3; _dim++)
_dist += pow2(f_streamlines[start_this * 3 + _dim] - f_streamlines[start_target * 3 + _dim]);
_variation += pow2(center_dist - sqrtf(_dist));
}
const int interval = backward + forward - 1;
if (interval > 0)
_variation /= interval;
else
_variation = 0;
distances[ptnum*maxNN + k] = center_dist;
variation[ptnum * maxNN + k] = _variation;
}
else break;
}
}
}
namespace cudadevice_variables {
GPU_SegmentsLv2* segslv2; //Lv.2 hash projector
GPU_Segments* segs;//centroid + line No. for Lv.1 LSH
float* l2buckets;
GPU_HashTable *d_hash;
GPU_Lsh_Func *d_funcs;
float* d_streamlines; // FileIO:: f_streamlines[0]
int* d_lineoffsets;// Streamline::sizes;
//Memory pool
int *d_cof,//current offset
*d_cmb,
*d_fin,
*d_mp,
*d_tmp;
int64_t* d_idxs;
const int poolsize = 134217728;//128 Words = 512MB
const int tmp_size = 4096;
const int set_associative = 2; // 2 pre allocated sets of tmp pages, 1 dynamically allocated tmp
//scale variables
int n_streamlines, n_points, n_segments;
}
//---------------------------------------------------------------------\\
--------------------- NVCC Compiled Host functions. ---------------------
void cudaInit(
Vector3 *f_streamlines, int* lineoffsets, GPU_Segments* segments,
GPU_Lsh_Func *lsh_funcs, GPU_HashTable *hash_table,
GPU_SegmentsLv2 *segslv2, float* l2buckets
)
{
}
void cudaLaunch() {
}
void cudaFinalize() {
}
|
22680ef63d6ad92e5997bcac4e4ed7b3f1c53f9d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
//134217728
// Definicin de nuestro kernel para funcin cuadradoV
__global__ void sumV_kernel_cuda(double *arrayA,double *arrayB , int n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n)
arrayA[global_id] = arrayA[global_id] + arrayB[global_id];
}
void checkparams(unsigned long *n, unsigned int *cb);
double dwalltime();
__global__ void sumV_kernel_cuda(double *d_vecA,double *d_vecB, long n, unsigned long dist){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id + dist < n)
d_vecA[global_id + dist] = d_vecA[global_id + dist] + d_vecB[global_id + dist];
}
int main(int argc, char *argv[]){
/*
if (argc != 3){
printf("Falta argumento: N, CUDA_BLK\n");
return 0;
}
unsigned long N = atoi (argv[1]);
unsigned int CUDA_BLK = atoi(argv[2]);*/
unsigned long N = 107107840;
unsigned int CUDA_BLK = 32;
unsigned long max_N = N;
checkparams(&max_N,&CUDA_BLK);
double *vecA,*vecB,*d_vecA,*d_vecB,timetick;
unsigned int i;
hipError_t error;
unsigned long numBytes =sizeof(double)*N ;
struct hipDeviceProp_t capabilities;
hipGetDeviceProperties (&capabilities, 0);
vecA = (double *)malloc(numBytes);
vecB = (double *)malloc(numBytes);
for (i = 0; i < N; i++){
vecA[i] = i;
vecB[i] = i;
}
hipMalloc((void **) &d_vecA, numBytes);
hipMalloc((void **) &d_vecB, numBytes);
hipMemcpy(d_vecA, vecA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_vecB, vecB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
// Bloque unidimensional de hilos (*cb* hilos)
dim3 dimBlock(32);
// Grid unidimensional (*ceil(n/cb)* bloques)
dim3 dimGrid((max_N + dimBlock.x - 1) / dimBlock.x);
long aux_N = N;
timetick = dwalltime();
int rep = 0;
while(aux_N > 0){
printf("%lu\n",aux_N);
hipLaunchKernelGGL(( sumV_kernel_cuda), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vecA, d_vecB, N, max_N*rep);
aux_N = aux_N - max_N;
rep++;
}
hipDeviceSynchronize();
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
error = hipGetLastError();
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
hipMemcpy(vecA, d_vecA, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
for(i= 0; i < 20; i++){
printf("%f|",vecA[i]);
}
printf("\n");
printf("error code: %d\n",error);
printf("\n%lu||||%lu\n",(max_N + dimBlock.x - 1) / dimBlock.x,CUDA_BLK);
hipFree (vecA);
hipFree (vecB);
free(vecA);
free(vecB);
return 0;
}
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
void checkparams(unsigned long *n, unsigned int *cb){
struct hipDeviceProp_t capabilities;
// Si menos numero total de hilos que tamao bloque, reducimos bloque
if (*cb > *n)
*cb = *n;
hipGetDeviceProperties (&capabilities, 0);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Nm. hilos/bloq cambiado a %d (mx por bloque para dev)\n\n",
*cb);
}
if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) {
*cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Nm. hilos/bloq cambiado a %d (mx por bloque para dev)\n",
*cb);
if (*n > (capabilities.maxGridSize[0] * *cb)) {
*n = capabilities.maxGridSize[0] * *cb;
printf("->Nm. total de hilos cambiado a %lu (mx por grid para \
dev)\n\n", *n);
} else {
printf("\n");
}
} else {
printf("->Nm. hilos/bloq cambiado a %d (%d mx. bloq/grid para \
dev)\n\n",
*cb, capabilities.maxGridSize[0]);
}
}
}
|
22680ef63d6ad92e5997bcac4e4ed7b3f1c53f9d.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
//134217728
// Definición de nuestro kernel para función cuadradoV
__global__ void sumV_kernel_cuda(double *arrayA,double *arrayB , int n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n)
arrayA[global_id] = arrayA[global_id] + arrayB[global_id];
}
void checkparams(unsigned long *n, unsigned int *cb);
double dwalltime();
__global__ void sumV_kernel_cuda(double *d_vecA,double *d_vecB, long n, unsigned long dist){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id + dist < n)
d_vecA[global_id + dist] = d_vecA[global_id + dist] + d_vecB[global_id + dist];
}
int main(int argc, char *argv[]){
/*
if (argc != 3){
printf("Falta argumento: N, CUDA_BLK\n");
return 0;
}
unsigned long N = atoi (argv[1]);
unsigned int CUDA_BLK = atoi(argv[2]);*/
unsigned long N = 107107840;
unsigned int CUDA_BLK = 32;
unsigned long max_N = N;
checkparams(&max_N,&CUDA_BLK);
double *vecA,*vecB,*d_vecA,*d_vecB,timetick;
unsigned int i;
cudaError_t error;
unsigned long numBytes =sizeof(double)*N ;
struct cudaDeviceProp capabilities;
cudaGetDeviceProperties (&capabilities, 0);
vecA = (double *)malloc(numBytes);
vecB = (double *)malloc(numBytes);
for (i = 0; i < N; i++){
vecA[i] = i;
vecB[i] = i;
}
cudaMalloc((void **) &d_vecA, numBytes);
cudaMalloc((void **) &d_vecB, numBytes);
cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_vecB, vecB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
// Bloque unidimensional de hilos (*cb* hilos)
dim3 dimBlock(32);
// Grid unidimensional (*ceil(n/cb)* bloques)
dim3 dimGrid((max_N + dimBlock.x - 1) / dimBlock.x);
long aux_N = N;
timetick = dwalltime();
int rep = 0;
while(aux_N > 0){
printf("%lu\n",aux_N);
sumV_kernel_cuda<<<dimGrid, dimBlock>>>(d_vecA, d_vecB, N, max_N*rep);
aux_N = aux_N - max_N;
rep++;
}
cudaThreadSynchronize();
printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick);
error = cudaGetLastError();
// Movemos resultado: GPU -> CPU
timetick = dwalltime();
cudaMemcpy(vecA, d_vecA, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick);
for(i= 0; i < 20; i++){
printf("%f|",vecA[i]);
}
printf("\n");
printf("error code: %d\n",error);
printf("\n%lu||||%lu\n",(max_N + dimBlock.x - 1) / dimBlock.x,CUDA_BLK);
cudaFree (vecA);
cudaFree (vecB);
free(vecA);
free(vecB);
return 0;
}
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
void checkparams(unsigned long *n, unsigned int *cb){
struct cudaDeviceProp capabilities;
// Si menos numero total de hilos que tamaño bloque, reducimos bloque
if (*cb > *n)
*cb = *n;
cudaGetDeviceProperties (&capabilities, 0);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n\n",
*cb);
}
if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) {
*cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n",
*cb);
if (*n > (capabilities.maxGridSize[0] * *cb)) {
*n = capabilities.maxGridSize[0] * *cb;
printf("->Núm. total de hilos cambiado a %lu (máx por grid para \
dev)\n\n", *n);
} else {
printf("\n");
}
} else {
printf("->Núm. hilos/bloq cambiado a %d (%d máx. bloq/grid para \
dev)\n\n",
*cb, capabilities.maxGridSize[0]);
}
}
}
|
1154a01ad1e36d769420caa2bd15114da9089743.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) || defined(_MSC_VER)
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
#include <windows.h>
#include <chrono>
#include "getopt.h"
# pragma warning(disable:4819)
#else
#include <getopt.h>
#endif
#include <fstream>
#include <iostream>
#include <hip/hip_runtime.h>
#include "nvTiff_utils.h"
#include "cudamacro.h"
#include <nvTiff.h>
#define CHECK_NVTIFF(call) \
{ \
nvtiffStatus_t _e = (call); \
if (_e != NVTIFF_STATUS_SUCCESS) \
{ \
std::cerr<< "nvTiff failure: '#" << _e<<std::endl; \
exit(EXIT_FAILURE); \
} \
}
//#define LIBTIFF_TEST
#ifdef LIBTIFF_TEST
#include <tiffio.h>
#endif
#define MAX_STR_LEN (256)
// quick and dirty BMP file writer
static void writeBMPFile(const char *filename, unsigned char *chan, int LD, int WIDTH, int HEIGHT, int BPP, int IS_GREYSCALE) {
unsigned int headers[13];
FILE * outfile;
int extrabytes;
int paddedsize;
int x; int y; int n;
int red, green, blue;
extrabytes = 4 - ((WIDTH * 3) % 4); // How many bytes of padding to add to each
// horizontal line - the size of which must
// be a multiple of 4 bytes.
if (extrabytes == 4)
extrabytes = 0;
paddedsize = ((WIDTH * 3) + extrabytes) * HEIGHT;
// Headers...
// Note that the "BM" identifier in bytes 0 and 1 is NOT included in these "headers".
headers[0] = paddedsize + 54; // bfSize (whole file size)
headers[1] = 0; // bfReserved (both)
headers[2] = 54; // bfOffbits
headers[3] = 40; // biSize
headers[4] = WIDTH; // biWidth
headers[5] = HEIGHT; // biHeight
// Would have biPlanes and biBitCount in position 6, but they're shorts.
// It's easier to write them out separately (see below) than pretend
// they're a single int, especially with endian issues...
headers[7] = 0; // biCompression
headers[8] = paddedsize; // biSizeImage
headers[9] = 0; // biXPelsPerMeter
headers[10] = 0; // biYPelsPerMeter
headers[11] = 0; // biClrUsed
headers[12] = 0; // biClrImportant
outfile = Fopen(filename, "wb");
//
// Headers begin...
// When printing ints and shorts, we write out 1 character at a time to avoid endian issues.
//
fprintf(outfile, "BM");
for (n = 0; n <= 5; n++)
{
fprintf(outfile, "%c", headers[n] & 0x000000FF);
fprintf(outfile, "%c", (headers[n] & 0x0000FF00) >> 8);
fprintf(outfile, "%c", (headers[n] & 0x00FF0000) >> 16);
fprintf(outfile, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24);
}
// These next 4 characters are for the biPlanes and biBitCount fields.
fprintf(outfile, "%c", 1);
fprintf(outfile, "%c", 0);
fprintf(outfile, "%c", 24);
fprintf(outfile, "%c", 0);
for (n = 7; n <= 12; n++)
{
fprintf(outfile, "%c", headers[n] & 0x000000FF);
fprintf(outfile, "%c", (headers[n] & 0x0000FF00) >> 8);
fprintf(outfile, "%c", (headers[n] & 0x00FF0000) >> 16);
fprintf(outfile, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24);
}
//
// Headers done, now write the data...
//
for (y = HEIGHT - 1; y >= 0; y--) // BMP image format is written from bottom to top...
{
for (x = 0; x <= WIDTH - 1; x++)
{
/*
red = reduce(redcount[x][y] + COLOUR_OFFSET) * red_multiplier;
green = reduce(greencount[x][y] + COLOUR_OFFSET) * green_multiplier;
blue = reduce(bluecount[x][y] + COLOUR_OFFSET) * blue_multiplier;
*/
if (!IS_GREYSCALE) {
red = chan[0 + y*LD*BPP + BPP*x];
green = chan[1 + y*LD*BPP + BPP*x];
blue = chan[2 + y*LD*BPP + BPP*x];
} else {
red = chan[0 + y*LD*BPP + BPP*x];
green = red;
blue = red;
}
if (red > 255) red = 255; if (red < 0) red = 0;
if (green > 255) green = 255; if (green < 0) green = 0;
if (blue > 255) blue = 255; if (blue < 0) blue = 0;
// Also, it's written in (b,g,r) format...
fprintf(outfile, "%c", blue);
fprintf(outfile, "%c", green);
fprintf(outfile, "%c", red);
}
if (extrabytes) // See above - BMP lines must be of lengths divisible by 4.
{
for (n = 1; n <= extrabytes; n++)
{
fprintf(outfile, "%c", 0);
}
}
}
fclose(outfile);
return;
}
void writePPM(const char * filename, unsigned char *chan, int LD, int WIDTH, int HEIGHT, int BPP, int NUMCOMP)
{
std::ofstream rOutputStream(filename);
if (!rOutputStream)
{
std::cerr << "Cannot open output file: " << filename << std::endl;
return;
}
if( NUMCOMP ==4)
{
rOutputStream << "P7\n";
rOutputStream << "#nvTIFF\n";
rOutputStream << "WIDTH "<<WIDTH<<"\n";
rOutputStream << "HEIGHT "<<HEIGHT<<"\n";
rOutputStream << "DEPTH "<<NUMCOMP<<"\n";
rOutputStream << "MAXVAL "<<(1<<BPP)-1<<"\n";
rOutputStream << "TUPLTYPE RGB_ALPHA\n";
rOutputStream << "ENDHDR\n";
}
else
{
rOutputStream << "P6\n";
rOutputStream << "#nvTIFF\n";
rOutputStream << WIDTH << " " << HEIGHT << "\n";
rOutputStream << (1<<BPP)-1<<"\n";
}
for(int y = 0; y < HEIGHT; y++)
{
for(int x = 0; x < WIDTH; x++)
{
if( BPP == 8)
{
rOutputStream << chan[(y*LD + x)*NUMCOMP];
rOutputStream << chan[(y*LD + x)*NUMCOMP + 1];
rOutputStream << chan[(y*LD + x)*NUMCOMP + 2];
if( NUMCOMP == 4)
{
rOutputStream << chan[(y*LD + x)*NUMCOMP + 3];;
}
}
else
{
int pixel_offset = (y * LD *NUMCOMP*2 + (x*NUMCOMP*2 ));
for( int c = 0; c < NUMCOMP; c++)
{
rOutputStream << chan[pixel_offset + 2 * c +1]<<chan[pixel_offset + 2*c];
}
}
}
}
return;
}
static void usage(const char *pname) {
fprintf(stdout,
"Usage:\n"
"%s [options] -f|--file <TIFF_FILE>\n"
"\n"
"General options:\n"
"\n"
"\t-d DEVICE_ID\n"
"\t--device DEVICE_ID\n"
"\t\tSpecifies the GPU to use for images decoding/encoding.\n"
"\t\tDefault: device 0 is used.\n"
"\n"
"\t-v\n"
"\t--verbose\n"
"\t\tPrints some information about the decoded TIFF file.\n"
"\n"
"\t-h\n"
"\t--help\n"
"\t\tPrints this help\n"
"\n"
"Decoding options:\n"
"\n"
"\t-f TIFF_FILE\n"
"\t--file TIFF_FILE\n"
"\t\tSpecifies the TIFF file to decode. The code supports both single and multi-image\n"
"\t\ttiff files with the following limitations: \n"
"\t\t * color space must be either Grayscale (PhotometricInterp.=1) or RGB (=2) \n"
"\t\t * image data compressed with LZW (Compression=5) or uncompressed \n"
"\t\t * pixel components stored in \"chunky\" format (RGB..., PlanarConfiguration=1)\n"
"\t\t for RGB images \n"
"\t\t * image data must be organized in Strips, not Tiles \n"
"\t\t * pixels of RGB images must be represented with at most 4 components \n"
"\t\t * each component must be represented exactly with: \n"
"\t\t * 8 bits for LZW compressed images \n"
"\t\t * 8, 16 or 32 bits for uncompressed images \n"
"\t\t * all images in the file must have the same properties \n"
"\n"
"\t-b BEG_FRM\n"
"\t--frame-beg BEG_FRM\n"
"\t\tSpecifies the image id in the input TIFF file to start decoding from. The image\n"
"\t\tid must be a value between 0 and the total number of images in the file minus 1.\n"
"\t\tValues less than 0 are clamped to 0.\n"
"\t\tDefault: 0\n"
"\n"
"\t-e END_FRM\n"
"\t--frame-end END_FRM\n"
"\t\tSpecifies the image id in the input TIFF file to stop decoding at (included).\n"
"\t\tThe image id must be a value between 0 and the total number of images in the\n"
"\t\tfile minus 1. Values greater than num_images-1 are clamped to num_images-1.\n"
"\t\tDefault: num_images-1.\n"
"\n"
"\t-m\n"
"\t--memtype TYPE\n"
"\t\tSpecifies the type of memory used to hold the TIFF file content: pinned or\n"
"\t\tpageable. Pinned memory is used if 'p' is specified. Pageable memory is used if\n"
"\t\t'r' is specified. In case of pinned memory, file content is not copied to\n"
"\t\tdevice memory before the decoding process (with a resulting performance impact)\n"
"\t\tunless the option -c is also specified (see below).\n"
"\t\tDefualt: r (pageable)\n"
"\n"
"\t-c\n"
"\t--copyh2d\n"
"\t\tSpecifies to copy the file data to device memory in case the -m option specifies\n"
"\t\tto use pinned memory. In case of pageable memory this option has no effect.\n"
"\t\tDefault: off.\n"
"\n"
"\t--decode-out NUM_OUT\n"
"\t\tEnables the writing of selected images from the decoded input TIFF file into\n"
"\t\tseparate BMP files for inspection. If no argument is passed, only the first\n"
"\t\timage is written to disk, otherwise the first NUM_OUT images are written.\n"
"\t\tOutput files are named outImage_0.bmp, outImage_1.bmp...\n"
"\t\tDefualt: disabled.\n"
"\n"
"Encoding options:\n"
"\n"
"\t-E\n"
"\t--encode\n"
"\t\tThis option enables the encoding of the raster images obtained by decoding the\n"
"\t\tinput TIFF file. The images are divided into strips, compressed with LZW and,\n"
"\t\toptionally, written into an output TIFF file.\n"
"\t\tDefault: disabled.\n"
"\n"
"\t-r\n"
"\t--rowsxstrip\n"
"\t\tSpecifies the number of consecutive rows to use to divide the images into\n"
"\t\tstrips. Each image is divided in strips of the same size (except possibly the\n"
"\t\tlast strip) and then the strips are compressed as independent byte streams.\n"
"\t\tThis option is ignored if -E is not specified.\n"
"\t\tDefault: 1.\n"
"\n"
"\t-s\n"
"\t--stripalloc\n"
"\t\tSpecifies the initial estimate of the maximum size of compressed strips. If\n"
"\t\tduring compression one or more strips require more space, the compression is\n"
"\t\taborted and restarted automatically with a safe estimate. \n"
"\t\tThis option is ignored if -E is not specified.\n"
"\t\tDefault: the size, in bytes, of a strip in the uncompressed images.\n"
"\n"
"\t--encode-out\n"
"\t\tEnables the writing of the compressed images to an output TIFF file named\n"
"\t\toutFile.tif.\n"
"\t\tDefualt: disabled.\n",
pname);
exit(EXIT_FAILURE);
}
int main(int argc, char **argv) {
int devId = 0;
char *fname = NULL;
int verbose = 0;
int decWriteOutN = 0;
int frameBeg = INT_MIN;
int frameEnd = INT_MAX;
int decodeRange = 0;
int memType = NVTIFF_MEM_REG;
int doH2DFileCopy = 0;
int doEncode = 0;
int encRowsPerStrip = 1;
unsigned long long encStripAllocSize = 0;
int encWriteOut = 0;
int och;
while(1) {
int option_index = 0;
static struct option long_options[] = {
{ "file", required_argument, 0, 'f'},
{ "device", required_argument, 0, 'd'},
{"decode-out", optional_argument, 0, 1},
{ "frame-beg", required_argument, 0, 'b'},
{ "frame-end", required_argument, 0, 'e'},
{ "memtype", required_argument, 0, 'm'},
{ "copyh2d", required_argument, 0, 'c'},
{ "verbose", no_argument, 0, 'v'},
{ "encode", no_argument, 0, 'E'},
{"rowsxstrip", required_argument, 0, 'r'},
{"stripalloc", required_argument, 0, 's'},
{"encode-out", optional_argument, 0, 2},
{ "help", no_argument, 0, 'h'},
{ 0, 0, 0, 0}
};
och = getopt_long(argc, argv, "f:d:vo::hb:e:m:cEr:s:", long_options, &option_index);
if (och == -1) break;
switch (och) {
case 0:// handles long opts with non-NULL flag field
break;
case 'd':
devId = atoi(optarg);
break;
case 'f':
fname = strdup(optarg);
break;
case 'b':
frameBeg = atoi(optarg);
decodeRange = 1;
break;
case 'e':
frameEnd = atoi(optarg);
decodeRange = 1;
break;
case 'm':
if (optarg[0] != 'r' && optarg[0] != 'p') {
fprintf(stderr, "Unknown memory type specified (%c)!\n", optarg[0]);
usage(argv[0]);
}
memType = (optarg[0] == 'r') ? NVTIFF_MEM_REG : NVTIFF_MEM_PIN;
break;
case 'c':
doH2DFileCopy = 1;
break;
case 'v':
verbose++;
break;
case 1:
decWriteOutN = 1;
if(!optarg &&
argv[optind] != NULL &&
argv[optind][0] != '-') {
decWriteOutN = atoi(argv[optind++]);
}
break;
case 'E':
doEncode = 1;
break;
case 'r':
encRowsPerStrip = atoi(optarg);
break;
case 's':
encStripAllocSize = atoi(optarg);
break;
case 2:
encWriteOut = 1;
break;
case 'h':
case '?':
usage(argv[0]);
default:
fprintf(stderr, "unknown option: %c\n", och);
usage(argv[0]);
}
}
if (!fname) {
fprintf(stderr, "Please specify a TIFF file with the -f option!\n");
usage(argv[0]);
}
if (frameBeg > frameEnd) {
fprintf(stderr, "Invalid frame range!\n");
usage(argv[0]);
}
CHECK_CUDA(hipSetDevice(devId));
hipDeviceProp_t props;
printf("\nUsing GPU:\n");
CHECK_CUDA(hipGetDeviceProperties(&props, devId));
printf("\t%2d (%s, %d SMs, %d th/SM max, CC %d.%d, ECC %s)\n",
devId, props.name, props.multiProcessorCount,
props.maxThreadsPerMultiProcessor,
props.major, props.minor,
props.ECCEnabled?"on":"off");
printf("\n");
// dummy allocation to initialize subsystems
unsigned char *dummy;
CHECK_CUDA(hipMalloc(&dummy, 1024*1024*10));
CHECK_CUDA(hipFree(dummy));
hipStream_t stream;
CHECK_CUDA(hipStreamCreate(&stream));
if (verbose > 1) {
nvTiffDumpRaw(fname);
}
nvtiffStream_t tiff_stream;
nvtiffDecoder_t decoder;
nvtiffFileInfo_t file_info;
CHECK_NVTIFF(nvtiffStreamCreate(&tiff_stream));
CHECK_NVTIFF(nvtiffDecoderCreate(&decoder,
nullptr, nullptr, 0));
CHECK_NVTIFF(nvtiffStreamParseFromFile(fname, tiff_stream));
CHECK_NVTIFF(nvtiffStreamGetFileInfo(tiff_stream, &file_info));
// BEGIN work (possibly) overlapped with H2D copy of the file data
if (verbose) {
CHECK_NVTIFF(nvtiffStreamPrint(tiff_stream));
}
frameBeg = fmax(frameBeg, 0);
frameEnd = fmin(frameEnd, file_info.num_images-1);
const int nDecode = frameEnd-frameBeg+1;
// allocate device memory for images
unsigned char **imageOut_d = NULL;
const size_t imageSize = sizeof(**imageOut_d)*file_info.image_width *
file_info.image_height *
(file_info.bits_per_pixel/8);
imageOut_d = (unsigned char **)Malloc(sizeof(*imageOut_d)*nDecode);
for(unsigned int i = 0; i < nDecode; i++) {
CHECK_CUDA(hipMalloc(imageOut_d+i, imageSize));
}
printf("Decoding %u, %s %ux%u images [%d, %d], from file %s... ",
nDecode,
file_info.photometric_int == NVTIFF_PHOTOMETRIC_RGB ? "RGB" : "Grayscale",
file_info.image_width,
file_info.image_height,
frameBeg,
frameEnd,
fname);
fflush(stdout);
double __t = Wtime();
if (!decodeRange) {
CHECK_NVTIFF(nvtiffDecode(tiff_stream, decoder, imageOut_d, stream));
} else {
CHECK_NVTIFF(nvtiffDecodeRange(tiff_stream, decoder, frameBeg, nDecode, imageOut_d, stream));
}
CHECK_CUDA(hipStreamSynchronize(stream));
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
if (decWriteOutN) {
unsigned char *imageOut_h = (unsigned char *)Malloc(sizeof(*imageOut_h)*imageSize);
const unsigned int nout = fmin(decWriteOutN, nDecode);
printf("\tWriting images for the first %d subfile(s)...\n", nout);
fflush(stdout);
__t = Wtime();
for(unsigned int i = 0; i < nout; i++) {
CHECK_CUDA(hipMemcpy(imageOut_h, imageOut_d[i], imageSize, hipMemcpyDeviceToHost));
char outfname[MAX_STR_LEN];
const int isgreyScale = (file_info.photometric_int == NVTIFF_PHOTOMETRIC_MINISWHITE) ||
(file_info.photometric_int == NVTIFF_PHOTOMETRIC_MINISBLACK);
//void writePPM(const char * filename, unsigned char *chan, int LD, int WIDTH, int HEIGHT, int BPP, int NUMCOMP)
if(file_info.bits_per_sample[0] == 16)
{
snprintf(outfname, MAX_STR_LEN, "outImage_%d.ppm", i);
writePPM(outfname,
imageOut_h,
file_info.image_width,
file_info.image_width,
file_info.image_height,
file_info.bits_per_sample[0], file_info.samples_per_pixel);
}
else
if (!isgreyScale || (isgreyScale && file_info.bits_per_pixel == 8)) {
snprintf(outfname, MAX_STR_LEN, "outImage_%d.bmp", i);
printf("\t\timage %u... BMP format\n", i);
writeBMPFile(outfname,
imageOut_h,
file_info.image_width,
file_info.image_width,
file_info.image_height,
//tiffData->subFiles[i].samplesPerPixel,
file_info.bits_per_pixel/8,
isgreyScale);
} else {
snprintf(outfname, MAX_STR_LEN, "outImage_%d.raw", i);
printf("\t\timage %u... RAW format\n", i);
FILE *f = Fopen(outfname, "w");
Fwrite(imageOut_h, imageSize, 1, f);
fclose(f);
}
}
__t = Wtime()-__t;
printf("\t...done in %lf secs\n\n", __t);
free(imageOut_h);
}
#ifdef LIBTIFF_TEST
TIFF* tif = TIFFOpen(fname, "r");
if (tif) {
// we alredy know that all subfiles have the same porperties
uint32_t *raster;
raster = (uint32_t *)_TIFFmalloc(tiffData->subFiles[0].ncol*tiffData->subFiles[0].nrow * sizeof (uint32_t));
printf("\tDecoding with libTIFF... "); fflush(stdout);
double __t = Wtime();
for(int i = 0; i < tiffData->nSubFiles; i++) {
if (!TIFFReadRGBAImage(tif,
tiffData->subFiles[i].ncol,
tiffData->subFiles[i].nrow,
raster, 0)) {
fprintf(stderr, "Error while decoding image %d with libTiff\n", i);
break;
}
TIFFReadDirectory(tif);
}
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
_TIFFfree(raster);
TIFFClose(tif);
}
#endif
if (doEncode) {
#if 0
unsigned char *tmp = (unsigned char *)Malloc(imageSize);
for(int i = 0; i < imageSize; i++) {
tmp[i] = rand()%256;
}
CHECK_CUDA(hipMemcpy(imageOut_d[0], tmp, imageSize, hipMemcpyHostToDevice));
free(tmp);
#endif
unsigned int nrow = file_info.image_height;
unsigned int ncol = file_info.image_width;
unsigned int photometricInt = (unsigned int)file_info.photometric_int;
unsigned int planarConf = (unsigned int)file_info.planar_config;
unsigned short pixelSize = file_info.bits_per_pixel/8;
unsigned short samplesPerPixel = file_info.samples_per_pixel;
unsigned short sampleFormat = file_info.sample_format[0];
unsigned short *bitsPerSample = (unsigned short *)Malloc(sizeof(*bitsPerSample)*samplesPerPixel);
memcpy(bitsPerSample,
file_info.bits_per_sample,
sizeof(*bitsPerSample)*samplesPerPixel);
CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream));
CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream));
tiff_stream = NULL;
decoder = NULL;
unsigned int nSubFiles = nDecode;
unsigned int nStripOut = DIV_UP(nrow, encRowsPerStrip);
unsigned int totStrips = nSubFiles*nStripOut;
unsigned long long *stripSize_d = NULL;
unsigned long long *stripOffs_d = NULL;
unsigned char *stripData_d = NULL;
if (encStripAllocSize <= 0) {
encStripAllocSize = encRowsPerStrip*ncol*(pixelSize);
}
CHECK_CUDA(hipMalloc(&stripSize_d, sizeof(*stripSize_d)*totStrips));
CHECK_CUDA(hipMalloc(&stripOffs_d, sizeof(*stripOffs_d)*totStrips));
CHECK_CUDA(hipMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize));
nvTiffEncodeCtx_t *ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut);
printf("Encoding %u, %s %ux%u images using %d rows per strip and %llu bytes per strip... ",
nDecode,
photometricInt == 2 ? "RGB" : "Grayscale",
ncol,
nrow,
encRowsPerStrip,
encStripAllocSize);
fflush(stdout);
int rv;
__t = Wtime();
do {
rv = nvTiffEncode(ctx,
nrow,
ncol,
pixelSize,
encRowsPerStrip,
nSubFiles,
imageOut_d,
encStripAllocSize,
stripSize_d,
stripOffs_d,
stripData_d,
stream);
if (rv != NVTIFF_ENCODE_SUCCESS) {
printf("error, while encoding images!\n");
exit(EXIT_FAILURE);
}
rv = nvTiffEncodeFinalize(ctx, stream);
if (rv != NVTIFF_ENCODE_SUCCESS) {
if (rv == NVTIFF_ENCODE_COMP_OVERFLOW) {
printf("overflow, using %llu bytes per strip...", ctx->stripSizeMax);
// * free ctx mem
// * reallocate a larger stripData_d buffer
// * init a new ctx and retry
// * retry compression
encStripAllocSize = ctx->stripSizeMax;
nvTiffEncodeCtxDestroy(ctx);
CHECK_CUDA(hipFree(stripData_d));
CHECK_CUDA(hipMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize));
ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut);
} else {
printf("error, while finalizing compressed images!\n");
exit(EXIT_FAILURE);
}
}
} while(rv == NVTIFF_ENCODE_COMP_OVERFLOW);
CHECK_CUDA(hipStreamSynchronize(stream));
__t = Wtime()-__t;
printf("done in %lf secs (compr. ratio: %.2lfx)\n\n",
__t, double(imageSize)*nSubFiles/ctx->stripSizeTot);
//printf("Total size of compressed strips: %llu bytes\n", ctx->stripSizeTot);
if (encWriteOut) {
unsigned long long *stripSize_h = (unsigned long long *)Malloc(sizeof(*stripSize_h)*totStrips);
CHECK_CUDA(hipMemcpy(stripSize_h,
stripSize_d,
sizeof(*stripSize_h)*totStrips,
hipMemcpyDeviceToHost));
unsigned long long *stripOffs_h = (unsigned long long *)Malloc(sizeof(*stripOffs_h)*totStrips);
CHECK_CUDA(hipMemcpy(stripOffs_h,
stripOffs_d,
sizeof(*stripOffs_h)*totStrips,
hipMemcpyDeviceToHost));
unsigned char *stripData_h = (unsigned char *)Malloc(sizeof(*stripData_h)*ctx->stripSizeTot);
CHECK_CUDA(hipMemcpy(stripData_h,
stripData_d,
ctx->stripSizeTot,
hipMemcpyDeviceToHost));
#if 0
FILE *fp = Fopen("stripData.txt", "w");
size_t stripSize = sizeof(*stripData_h)*encRowsPerStrip*ncol*pixelSize;
for(unsigned int i = 0; i < nSubFiles; i++) {
fprintf(fp, "compressed image %d:\n", i);
for(unsigned int j = 0; j < nStripOut; j++) {
unsigned long long off = stripOffs_h[i*nStripOut + j];
unsigned long long len = stripSize_h[i*nStripOut + j];
fprintf(fp, "\tstrip %5u, size: %6llu bytes (ratio: %5.2lfx), "
"fingerprint: %02X %02X %02X %02X ... %02X %02X %02X %02X\n",
j, len, double(stripSize)/len,
stripData_h[off + 0],
stripData_h[off + 1],
stripData_h[off + 2],
stripData_h[off + 3],
stripData_h[off + len-4],
stripData_h[off + len-3],
stripData_h[off + len-2],
stripData_h[off + len-1]);
}
fprintf(fp, "\n");
}
fclose(fp);
#endif
printf("\tWriting %u compressed images to TIFF file... ", nDecode); fflush(stdout);
__t = Wtime();
nvTiffWriteFile("outFile.tif",
VER_REG_TIFF,
nSubFiles,
nrow,
ncol,
encRowsPerStrip,
samplesPerPixel,
bitsPerSample,
photometricInt,
planarConf,
stripSize_h,
stripOffs_h,
stripData_h,
sampleFormat);
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
free(stripSize_h);
free(stripOffs_h);
free(stripData_h);
}
#ifdef LIBTIFF_TEST
tif = TIFFOpen("libTiffOut.tif", "w");
if (tif) {
unsigned char **imageOut_h = (unsigned char **)Malloc(sizeof(*imageOut_h)*nDecode);
for(unsigned int i = 0; i < nDecode; i++) {
imageOut_h[i] = (unsigned char *)Malloc(sizeof(*imageOut_h)*imageSize);
CHECK_CUDA(hipMemcpy(imageOut_h[i],
imageOut_d[i],
imageSize,
hipMemcpyDeviceToHost));
}
size_t stripSize = sizeof(**imageOut_h)*encRowsPerStrip*ncol*pixelSize;
printf("\tEncoding with libTIFF... "); fflush(stdout);
__t = Wtime();
for(unsigned int i = 0; i < nDecode; i++) {
TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, nrow);
TIFFSetField(tif, TIFFTAG_IMAGELENGTH, ncol);
TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_LZW);
TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, photometricInt);
TIFFSetField(tif, TIFFTAG_FILLORDER, 1);
TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel);
TIFFSetField(tif, TIFFTAG_PLANARCONFIG, planarConf);
TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, encRowsPerStrip);
for(unsigned int j = 0; j < nStripOut; j++) {
unsigned int currStripSize = stripSize;
if (j == nStripOut-1) {
currStripSize = imageSize - j*stripSize;
}
if (-1 == TIFFWriteEncodedStrip(tif,
j,
imageOut_h[i]+j*stripSize,
currStripSize)) {
fprintf(stderr, "Error while encoding image %d with libTiff\n", i);
break;
}
}
// need to find a way to have libTiff to encode in
// memory without writing to disk the last direnctory
// after each TIFFWriteDirectory() call
TIFFWriteDirectory(tif);
//TIFFRewriteDirectory(tif);
}
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
TIFFClose(tif);
}
#endif
CHECK_CUDA(hipFree(stripSize_d));
CHECK_CUDA(hipFree(stripOffs_d));
CHECK_CUDA(hipFree(stripData_d));
free(bitsPerSample);
nvTiffEncodeCtxDestroy(ctx);
}
// cleanup
for(unsigned int i = 0; i < nDecode; i++) {
CHECK_CUDA(hipFree(imageOut_d[i]));
}
free(imageOut_d);
free(fname);
if(tiff_stream) {
CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream));
}
if(decoder){
CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream));
}
CHECK_CUDA(hipStreamDestroy(stream));
CHECK_CUDA(hipDeviceReset());
return 0;
}
|
1154a01ad1e36d769420caa2bd15114da9089743.cu
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) || defined(_MSC_VER)
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
#include <windows.h>
#include <chrono>
#include "getopt.h"
# pragma warning(disable:4819)
#else
#include <getopt.h>
#endif
#include <fstream>
#include <iostream>
#include <cuda_runtime.h>
#include "nvTiff_utils.h"
#include "cudamacro.h"
#include <nvTiff.h>
#define CHECK_NVTIFF(call) \
{ \
nvtiffStatus_t _e = (call); \
if (_e != NVTIFF_STATUS_SUCCESS) \
{ \
std::cerr<< "nvTiff failure: '#" << _e<<std::endl; \
exit(EXIT_FAILURE); \
} \
}
//#define LIBTIFF_TEST
#ifdef LIBTIFF_TEST
#include <tiffio.h>
#endif
#define MAX_STR_LEN (256)
// quick and dirty BMP file writer
static void writeBMPFile(const char *filename, unsigned char *chan, int LD, int WIDTH, int HEIGHT, int BPP, int IS_GREYSCALE) {
unsigned int headers[13];
FILE * outfile;
int extrabytes;
int paddedsize;
int x; int y; int n;
int red, green, blue;
extrabytes = 4 - ((WIDTH * 3) % 4); // How many bytes of padding to add to each
// horizontal line - the size of which must
// be a multiple of 4 bytes.
if (extrabytes == 4)
extrabytes = 0;
paddedsize = ((WIDTH * 3) + extrabytes) * HEIGHT;
// Headers...
// Note that the "BM" identifier in bytes 0 and 1 is NOT included in these "headers".
headers[0] = paddedsize + 54; // bfSize (whole file size)
headers[1] = 0; // bfReserved (both)
headers[2] = 54; // bfOffbits
headers[3] = 40; // biSize
headers[4] = WIDTH; // biWidth
headers[5] = HEIGHT; // biHeight
// Would have biPlanes and biBitCount in position 6, but they're shorts.
// It's easier to write them out separately (see below) than pretend
// they're a single int, especially with endian issues...
headers[7] = 0; // biCompression
headers[8] = paddedsize; // biSizeImage
headers[9] = 0; // biXPelsPerMeter
headers[10] = 0; // biYPelsPerMeter
headers[11] = 0; // biClrUsed
headers[12] = 0; // biClrImportant
outfile = Fopen(filename, "wb");
//
// Headers begin...
// When printing ints and shorts, we write out 1 character at a time to avoid endian issues.
//
fprintf(outfile, "BM");
for (n = 0; n <= 5; n++)
{
fprintf(outfile, "%c", headers[n] & 0x000000FF);
fprintf(outfile, "%c", (headers[n] & 0x0000FF00) >> 8);
fprintf(outfile, "%c", (headers[n] & 0x00FF0000) >> 16);
fprintf(outfile, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24);
}
// These next 4 characters are for the biPlanes and biBitCount fields.
fprintf(outfile, "%c", 1);
fprintf(outfile, "%c", 0);
fprintf(outfile, "%c", 24);
fprintf(outfile, "%c", 0);
for (n = 7; n <= 12; n++)
{
fprintf(outfile, "%c", headers[n] & 0x000000FF);
fprintf(outfile, "%c", (headers[n] & 0x0000FF00) >> 8);
fprintf(outfile, "%c", (headers[n] & 0x00FF0000) >> 16);
fprintf(outfile, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24);
}
//
// Headers done, now write the data...
//
for (y = HEIGHT - 1; y >= 0; y--) // BMP image format is written from bottom to top...
{
for (x = 0; x <= WIDTH - 1; x++)
{
/*
red = reduce(redcount[x][y] + COLOUR_OFFSET) * red_multiplier;
green = reduce(greencount[x][y] + COLOUR_OFFSET) * green_multiplier;
blue = reduce(bluecount[x][y] + COLOUR_OFFSET) * blue_multiplier;
*/
if (!IS_GREYSCALE) {
red = chan[0 + y*LD*BPP + BPP*x];
green = chan[1 + y*LD*BPP + BPP*x];
blue = chan[2 + y*LD*BPP + BPP*x];
} else {
red = chan[0 + y*LD*BPP + BPP*x];
green = red;
blue = red;
}
if (red > 255) red = 255; if (red < 0) red = 0;
if (green > 255) green = 255; if (green < 0) green = 0;
if (blue > 255) blue = 255; if (blue < 0) blue = 0;
// Also, it's written in (b,g,r) format...
fprintf(outfile, "%c", blue);
fprintf(outfile, "%c", green);
fprintf(outfile, "%c", red);
}
if (extrabytes) // See above - BMP lines must be of lengths divisible by 4.
{
for (n = 1; n <= extrabytes; n++)
{
fprintf(outfile, "%c", 0);
}
}
}
fclose(outfile);
return;
}
void writePPM(const char * filename, unsigned char *chan, int LD, int WIDTH, int HEIGHT, int BPP, int NUMCOMP)
{
std::ofstream rOutputStream(filename);
if (!rOutputStream)
{
std::cerr << "Cannot open output file: " << filename << std::endl;
return;
}
if( NUMCOMP ==4)
{
rOutputStream << "P7\n";
rOutputStream << "#nvTIFF\n";
rOutputStream << "WIDTH "<<WIDTH<<"\n";
rOutputStream << "HEIGHT "<<HEIGHT<<"\n";
rOutputStream << "DEPTH "<<NUMCOMP<<"\n";
rOutputStream << "MAXVAL "<<(1<<BPP)-1<<"\n";
rOutputStream << "TUPLTYPE RGB_ALPHA\n";
rOutputStream << "ENDHDR\n";
}
else
{
rOutputStream << "P6\n";
rOutputStream << "#nvTIFF\n";
rOutputStream << WIDTH << " " << HEIGHT << "\n";
rOutputStream << (1<<BPP)-1<<"\n";
}
for(int y = 0; y < HEIGHT; y++)
{
for(int x = 0; x < WIDTH; x++)
{
if( BPP == 8)
{
rOutputStream << chan[(y*LD + x)*NUMCOMP];
rOutputStream << chan[(y*LD + x)*NUMCOMP + 1];
rOutputStream << chan[(y*LD + x)*NUMCOMP + 2];
if( NUMCOMP == 4)
{
rOutputStream << chan[(y*LD + x)*NUMCOMP + 3];;
}
}
else
{
int pixel_offset = (y * LD *NUMCOMP*2 + (x*NUMCOMP*2 ));
for( int c = 0; c < NUMCOMP; c++)
{
rOutputStream << chan[pixel_offset + 2 * c +1]<<chan[pixel_offset + 2*c];
}
}
}
}
return;
}
static void usage(const char *pname) {
fprintf(stdout,
"Usage:\n"
"%s [options] -f|--file <TIFF_FILE>\n"
"\n"
"General options:\n"
"\n"
"\t-d DEVICE_ID\n"
"\t--device DEVICE_ID\n"
"\t\tSpecifies the GPU to use for images decoding/encoding.\n"
"\t\tDefault: device 0 is used.\n"
"\n"
"\t-v\n"
"\t--verbose\n"
"\t\tPrints some information about the decoded TIFF file.\n"
"\n"
"\t-h\n"
"\t--help\n"
"\t\tPrints this help\n"
"\n"
"Decoding options:\n"
"\n"
"\t-f TIFF_FILE\n"
"\t--file TIFF_FILE\n"
"\t\tSpecifies the TIFF file to decode. The code supports both single and multi-image\n"
"\t\ttiff files with the following limitations: \n"
"\t\t * color space must be either Grayscale (PhotometricInterp.=1) or RGB (=2) \n"
"\t\t * image data compressed with LZW (Compression=5) or uncompressed \n"
"\t\t * pixel components stored in \"chunky\" format (RGB..., PlanarConfiguration=1)\n"
"\t\t for RGB images \n"
"\t\t * image data must be organized in Strips, not Tiles \n"
"\t\t * pixels of RGB images must be represented with at most 4 components \n"
"\t\t * each component must be represented exactly with: \n"
"\t\t * 8 bits for LZW compressed images \n"
"\t\t * 8, 16 or 32 bits for uncompressed images \n"
"\t\t * all images in the file must have the same properties \n"
"\n"
"\t-b BEG_FRM\n"
"\t--frame-beg BEG_FRM\n"
"\t\tSpecifies the image id in the input TIFF file to start decoding from. The image\n"
"\t\tid must be a value between 0 and the total number of images in the file minus 1.\n"
"\t\tValues less than 0 are clamped to 0.\n"
"\t\tDefault: 0\n"
"\n"
"\t-e END_FRM\n"
"\t--frame-end END_FRM\n"
"\t\tSpecifies the image id in the input TIFF file to stop decoding at (included).\n"
"\t\tThe image id must be a value between 0 and the total number of images in the\n"
"\t\tfile minus 1. Values greater than num_images-1 are clamped to num_images-1.\n"
"\t\tDefault: num_images-1.\n"
"\n"
"\t-m\n"
"\t--memtype TYPE\n"
"\t\tSpecifies the type of memory used to hold the TIFF file content: pinned or\n"
"\t\tpageable. Pinned memory is used if 'p' is specified. Pageable memory is used if\n"
"\t\t'r' is specified. In case of pinned memory, file content is not copied to\n"
"\t\tdevice memory before the decoding process (with a resulting performance impact)\n"
"\t\tunless the option -c is also specified (see below).\n"
"\t\tDefualt: r (pageable)\n"
"\n"
"\t-c\n"
"\t--copyh2d\n"
"\t\tSpecifies to copy the file data to device memory in case the -m option specifies\n"
"\t\tto use pinned memory. In case of pageable memory this option has no effect.\n"
"\t\tDefault: off.\n"
"\n"
"\t--decode-out NUM_OUT\n"
"\t\tEnables the writing of selected images from the decoded input TIFF file into\n"
"\t\tseparate BMP files for inspection. If no argument is passed, only the first\n"
"\t\timage is written to disk, otherwise the first NUM_OUT images are written.\n"
"\t\tOutput files are named outImage_0.bmp, outImage_1.bmp...\n"
"\t\tDefualt: disabled.\n"
"\n"
"Encoding options:\n"
"\n"
"\t-E\n"
"\t--encode\n"
"\t\tThis option enables the encoding of the raster images obtained by decoding the\n"
"\t\tinput TIFF file. The images are divided into strips, compressed with LZW and,\n"
"\t\toptionally, written into an output TIFF file.\n"
"\t\tDefault: disabled.\n"
"\n"
"\t-r\n"
"\t--rowsxstrip\n"
"\t\tSpecifies the number of consecutive rows to use to divide the images into\n"
"\t\tstrips. Each image is divided in strips of the same size (except possibly the\n"
"\t\tlast strip) and then the strips are compressed as independent byte streams.\n"
"\t\tThis option is ignored if -E is not specified.\n"
"\t\tDefault: 1.\n"
"\n"
"\t-s\n"
"\t--stripalloc\n"
"\t\tSpecifies the initial estimate of the maximum size of compressed strips. If\n"
"\t\tduring compression one or more strips require more space, the compression is\n"
"\t\taborted and restarted automatically with a safe estimate. \n"
"\t\tThis option is ignored if -E is not specified.\n"
"\t\tDefault: the size, in bytes, of a strip in the uncompressed images.\n"
"\n"
"\t--encode-out\n"
"\t\tEnables the writing of the compressed images to an output TIFF file named\n"
"\t\toutFile.tif.\n"
"\t\tDefualt: disabled.\n",
pname);
exit(EXIT_FAILURE);
}
int main(int argc, char **argv) {
int devId = 0;
char *fname = NULL;
int verbose = 0;
int decWriteOutN = 0;
int frameBeg = INT_MIN;
int frameEnd = INT_MAX;
int decodeRange = 0;
int memType = NVTIFF_MEM_REG;
int doH2DFileCopy = 0;
int doEncode = 0;
int encRowsPerStrip = 1;
unsigned long long encStripAllocSize = 0;
int encWriteOut = 0;
int och;
while(1) {
int option_index = 0;
static struct option long_options[] = {
{ "file", required_argument, 0, 'f'},
{ "device", required_argument, 0, 'd'},
{"decode-out", optional_argument, 0, 1},
{ "frame-beg", required_argument, 0, 'b'},
{ "frame-end", required_argument, 0, 'e'},
{ "memtype", required_argument, 0, 'm'},
{ "copyh2d", required_argument, 0, 'c'},
{ "verbose", no_argument, 0, 'v'},
{ "encode", no_argument, 0, 'E'},
{"rowsxstrip", required_argument, 0, 'r'},
{"stripalloc", required_argument, 0, 's'},
{"encode-out", optional_argument, 0, 2},
{ "help", no_argument, 0, 'h'},
{ 0, 0, 0, 0}
};
och = getopt_long(argc, argv, "f:d:vo::hb:e:m:cEr:s:", long_options, &option_index);
if (och == -1) break;
switch (och) {
case 0:// handles long opts with non-NULL flag field
break;
case 'd':
devId = atoi(optarg);
break;
case 'f':
fname = strdup(optarg);
break;
case 'b':
frameBeg = atoi(optarg);
decodeRange = 1;
break;
case 'e':
frameEnd = atoi(optarg);
decodeRange = 1;
break;
case 'm':
if (optarg[0] != 'r' && optarg[0] != 'p') {
fprintf(stderr, "Unknown memory type specified (%c)!\n", optarg[0]);
usage(argv[0]);
}
memType = (optarg[0] == 'r') ? NVTIFF_MEM_REG : NVTIFF_MEM_PIN;
break;
case 'c':
doH2DFileCopy = 1;
break;
case 'v':
verbose++;
break;
case 1:
decWriteOutN = 1;
if(!optarg &&
argv[optind] != NULL &&
argv[optind][0] != '-') {
decWriteOutN = atoi(argv[optind++]);
}
break;
case 'E':
doEncode = 1;
break;
case 'r':
encRowsPerStrip = atoi(optarg);
break;
case 's':
encStripAllocSize = atoi(optarg);
break;
case 2:
encWriteOut = 1;
break;
case 'h':
case '?':
usage(argv[0]);
default:
fprintf(stderr, "unknown option: %c\n", och);
usage(argv[0]);
}
}
if (!fname) {
fprintf(stderr, "Please specify a TIFF file with the -f option!\n");
usage(argv[0]);
}
if (frameBeg > frameEnd) {
fprintf(stderr, "Invalid frame range!\n");
usage(argv[0]);
}
CHECK_CUDA(cudaSetDevice(devId));
cudaDeviceProp props;
printf("\nUsing GPU:\n");
CHECK_CUDA(cudaGetDeviceProperties(&props, devId));
printf("\t%2d (%s, %d SMs, %d th/SM max, CC %d.%d, ECC %s)\n",
devId, props.name, props.multiProcessorCount,
props.maxThreadsPerMultiProcessor,
props.major, props.minor,
props.ECCEnabled?"on":"off");
printf("\n");
// dummy allocation to initialize subsystems
unsigned char *dummy;
CHECK_CUDA(cudaMalloc(&dummy, 1024*1024*10));
CHECK_CUDA(cudaFree(dummy));
cudaStream_t stream;
CHECK_CUDA(cudaStreamCreate(&stream));
if (verbose > 1) {
nvTiffDumpRaw(fname);
}
nvtiffStream_t tiff_stream;
nvtiffDecoder_t decoder;
nvtiffFileInfo_t file_info;
CHECK_NVTIFF(nvtiffStreamCreate(&tiff_stream));
CHECK_NVTIFF(nvtiffDecoderCreate(&decoder,
nullptr, nullptr, 0));
CHECK_NVTIFF(nvtiffStreamParseFromFile(fname, tiff_stream));
CHECK_NVTIFF(nvtiffStreamGetFileInfo(tiff_stream, &file_info));
// BEGIN work (possibly) overlapped with H2D copy of the file data
if (verbose) {
CHECK_NVTIFF(nvtiffStreamPrint(tiff_stream));
}
frameBeg = fmax(frameBeg, 0);
frameEnd = fmin(frameEnd, file_info.num_images-1);
const int nDecode = frameEnd-frameBeg+1;
// allocate device memory for images
unsigned char **imageOut_d = NULL;
const size_t imageSize = sizeof(**imageOut_d)*file_info.image_width *
file_info.image_height *
(file_info.bits_per_pixel/8);
imageOut_d = (unsigned char **)Malloc(sizeof(*imageOut_d)*nDecode);
for(unsigned int i = 0; i < nDecode; i++) {
CHECK_CUDA(cudaMalloc(imageOut_d+i, imageSize));
}
printf("Decoding %u, %s %ux%u images [%d, %d], from file %s... ",
nDecode,
file_info.photometric_int == NVTIFF_PHOTOMETRIC_RGB ? "RGB" : "Grayscale",
file_info.image_width,
file_info.image_height,
frameBeg,
frameEnd,
fname);
fflush(stdout);
double __t = Wtime();
if (!decodeRange) {
CHECK_NVTIFF(nvtiffDecode(tiff_stream, decoder, imageOut_d, stream));
} else {
CHECK_NVTIFF(nvtiffDecodeRange(tiff_stream, decoder, frameBeg, nDecode, imageOut_d, stream));
}
CHECK_CUDA(cudaStreamSynchronize(stream));
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
if (decWriteOutN) {
unsigned char *imageOut_h = (unsigned char *)Malloc(sizeof(*imageOut_h)*imageSize);
const unsigned int nout = fmin(decWriteOutN, nDecode);
printf("\tWriting images for the first %d subfile(s)...\n", nout);
fflush(stdout);
__t = Wtime();
for(unsigned int i = 0; i < nout; i++) {
CHECK_CUDA(cudaMemcpy(imageOut_h, imageOut_d[i], imageSize, cudaMemcpyDeviceToHost));
char outfname[MAX_STR_LEN];
const int isgreyScale = (file_info.photometric_int == NVTIFF_PHOTOMETRIC_MINISWHITE) ||
(file_info.photometric_int == NVTIFF_PHOTOMETRIC_MINISBLACK);
//void writePPM(const char * filename, unsigned char *chan, int LD, int WIDTH, int HEIGHT, int BPP, int NUMCOMP)
if(file_info.bits_per_sample[0] == 16)
{
snprintf(outfname, MAX_STR_LEN, "outImage_%d.ppm", i);
writePPM(outfname,
imageOut_h,
file_info.image_width,
file_info.image_width,
file_info.image_height,
file_info.bits_per_sample[0], file_info.samples_per_pixel);
}
else
if (!isgreyScale || (isgreyScale && file_info.bits_per_pixel == 8)) {
snprintf(outfname, MAX_STR_LEN, "outImage_%d.bmp", i);
printf("\t\timage %u... BMP format\n", i);
writeBMPFile(outfname,
imageOut_h,
file_info.image_width,
file_info.image_width,
file_info.image_height,
//tiffData->subFiles[i].samplesPerPixel,
file_info.bits_per_pixel/8,
isgreyScale);
} else {
snprintf(outfname, MAX_STR_LEN, "outImage_%d.raw", i);
printf("\t\timage %u... RAW format\n", i);
FILE *f = Fopen(outfname, "w");
Fwrite(imageOut_h, imageSize, 1, f);
fclose(f);
}
}
__t = Wtime()-__t;
printf("\t...done in %lf secs\n\n", __t);
free(imageOut_h);
}
#ifdef LIBTIFF_TEST
TIFF* tif = TIFFOpen(fname, "r");
if (tif) {
// we alredy know that all subfiles have the same porperties
uint32_t *raster;
raster = (uint32_t *)_TIFFmalloc(tiffData->subFiles[0].ncol*tiffData->subFiles[0].nrow * sizeof (uint32_t));
printf("\tDecoding with libTIFF... "); fflush(stdout);
double __t = Wtime();
for(int i = 0; i < tiffData->nSubFiles; i++) {
if (!TIFFReadRGBAImage(tif,
tiffData->subFiles[i].ncol,
tiffData->subFiles[i].nrow,
raster, 0)) {
fprintf(stderr, "Error while decoding image %d with libTiff\n", i);
break;
}
TIFFReadDirectory(tif);
}
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
_TIFFfree(raster);
TIFFClose(tif);
}
#endif
if (doEncode) {
#if 0
unsigned char *tmp = (unsigned char *)Malloc(imageSize);
for(int i = 0; i < imageSize; i++) {
tmp[i] = rand()%256;
}
CHECK_CUDA(cudaMemcpy(imageOut_d[0], tmp, imageSize, cudaMemcpyHostToDevice));
free(tmp);
#endif
unsigned int nrow = file_info.image_height;
unsigned int ncol = file_info.image_width;
unsigned int photometricInt = (unsigned int)file_info.photometric_int;
unsigned int planarConf = (unsigned int)file_info.planar_config;
unsigned short pixelSize = file_info.bits_per_pixel/8;
unsigned short samplesPerPixel = file_info.samples_per_pixel;
unsigned short sampleFormat = file_info.sample_format[0];
unsigned short *bitsPerSample = (unsigned short *)Malloc(sizeof(*bitsPerSample)*samplesPerPixel);
memcpy(bitsPerSample,
file_info.bits_per_sample,
sizeof(*bitsPerSample)*samplesPerPixel);
CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream));
CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream));
tiff_stream = NULL;
decoder = NULL;
unsigned int nSubFiles = nDecode;
unsigned int nStripOut = DIV_UP(nrow, encRowsPerStrip);
unsigned int totStrips = nSubFiles*nStripOut;
unsigned long long *stripSize_d = NULL;
unsigned long long *stripOffs_d = NULL;
unsigned char *stripData_d = NULL;
if (encStripAllocSize <= 0) {
encStripAllocSize = encRowsPerStrip*ncol*(pixelSize);
}
CHECK_CUDA(cudaMalloc(&stripSize_d, sizeof(*stripSize_d)*totStrips));
CHECK_CUDA(cudaMalloc(&stripOffs_d, sizeof(*stripOffs_d)*totStrips));
CHECK_CUDA(cudaMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize));
nvTiffEncodeCtx_t *ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut);
printf("Encoding %u, %s %ux%u images using %d rows per strip and %llu bytes per strip... ",
nDecode,
photometricInt == 2 ? "RGB" : "Grayscale",
ncol,
nrow,
encRowsPerStrip,
encStripAllocSize);
fflush(stdout);
int rv;
__t = Wtime();
do {
rv = nvTiffEncode(ctx,
nrow,
ncol,
pixelSize,
encRowsPerStrip,
nSubFiles,
imageOut_d,
encStripAllocSize,
stripSize_d,
stripOffs_d,
stripData_d,
stream);
if (rv != NVTIFF_ENCODE_SUCCESS) {
printf("error, while encoding images!\n");
exit(EXIT_FAILURE);
}
rv = nvTiffEncodeFinalize(ctx, stream);
if (rv != NVTIFF_ENCODE_SUCCESS) {
if (rv == NVTIFF_ENCODE_COMP_OVERFLOW) {
printf("overflow, using %llu bytes per strip...", ctx->stripSizeMax);
// * free ctx mem
// * reallocate a larger stripData_d buffer
// * init a new ctx and retry
// * retry compression
encStripAllocSize = ctx->stripSizeMax;
nvTiffEncodeCtxDestroy(ctx);
CHECK_CUDA(cudaFree(stripData_d));
CHECK_CUDA(cudaMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize));
ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut);
} else {
printf("error, while finalizing compressed images!\n");
exit(EXIT_FAILURE);
}
}
} while(rv == NVTIFF_ENCODE_COMP_OVERFLOW);
CHECK_CUDA(cudaStreamSynchronize(stream));
__t = Wtime()-__t;
printf("done in %lf secs (compr. ratio: %.2lfx)\n\n",
__t, double(imageSize)*nSubFiles/ctx->stripSizeTot);
//printf("Total size of compressed strips: %llu bytes\n", ctx->stripSizeTot);
if (encWriteOut) {
unsigned long long *stripSize_h = (unsigned long long *)Malloc(sizeof(*stripSize_h)*totStrips);
CHECK_CUDA(cudaMemcpy(stripSize_h,
stripSize_d,
sizeof(*stripSize_h)*totStrips,
cudaMemcpyDeviceToHost));
unsigned long long *stripOffs_h = (unsigned long long *)Malloc(sizeof(*stripOffs_h)*totStrips);
CHECK_CUDA(cudaMemcpy(stripOffs_h,
stripOffs_d,
sizeof(*stripOffs_h)*totStrips,
cudaMemcpyDeviceToHost));
unsigned char *stripData_h = (unsigned char *)Malloc(sizeof(*stripData_h)*ctx->stripSizeTot);
CHECK_CUDA(cudaMemcpy(stripData_h,
stripData_d,
ctx->stripSizeTot,
cudaMemcpyDeviceToHost));
#if 0
FILE *fp = Fopen("stripData.txt", "w");
size_t stripSize = sizeof(*stripData_h)*encRowsPerStrip*ncol*pixelSize;
for(unsigned int i = 0; i < nSubFiles; i++) {
fprintf(fp, "compressed image %d:\n", i);
for(unsigned int j = 0; j < nStripOut; j++) {
unsigned long long off = stripOffs_h[i*nStripOut + j];
unsigned long long len = stripSize_h[i*nStripOut + j];
fprintf(fp, "\tstrip %5u, size: %6llu bytes (ratio: %5.2lfx), "
"fingerprint: %02X %02X %02X %02X ... %02X %02X %02X %02X\n",
j, len, double(stripSize)/len,
stripData_h[off + 0],
stripData_h[off + 1],
stripData_h[off + 2],
stripData_h[off + 3],
stripData_h[off + len-4],
stripData_h[off + len-3],
stripData_h[off + len-2],
stripData_h[off + len-1]);
}
fprintf(fp, "\n");
}
fclose(fp);
#endif
printf("\tWriting %u compressed images to TIFF file... ", nDecode); fflush(stdout);
__t = Wtime();
nvTiffWriteFile("outFile.tif",
VER_REG_TIFF,
nSubFiles,
nrow,
ncol,
encRowsPerStrip,
samplesPerPixel,
bitsPerSample,
photometricInt,
planarConf,
stripSize_h,
stripOffs_h,
stripData_h,
sampleFormat);
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
free(stripSize_h);
free(stripOffs_h);
free(stripData_h);
}
#ifdef LIBTIFF_TEST
tif = TIFFOpen("libTiffOut.tif", "w");
if (tif) {
unsigned char **imageOut_h = (unsigned char **)Malloc(sizeof(*imageOut_h)*nDecode);
for(unsigned int i = 0; i < nDecode; i++) {
imageOut_h[i] = (unsigned char *)Malloc(sizeof(*imageOut_h)*imageSize);
CHECK_CUDA(cudaMemcpy(imageOut_h[i],
imageOut_d[i],
imageSize,
cudaMemcpyDeviceToHost));
}
size_t stripSize = sizeof(**imageOut_h)*encRowsPerStrip*ncol*pixelSize;
printf("\tEncoding with libTIFF... "); fflush(stdout);
__t = Wtime();
for(unsigned int i = 0; i < nDecode; i++) {
TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, nrow);
TIFFSetField(tif, TIFFTAG_IMAGELENGTH, ncol);
TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_LZW);
TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, photometricInt);
TIFFSetField(tif, TIFFTAG_FILLORDER, 1);
TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel);
TIFFSetField(tif, TIFFTAG_PLANARCONFIG, planarConf);
TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, encRowsPerStrip);
for(unsigned int j = 0; j < nStripOut; j++) {
unsigned int currStripSize = stripSize;
if (j == nStripOut-1) {
currStripSize = imageSize - j*stripSize;
}
if (-1 == TIFFWriteEncodedStrip(tif,
j,
imageOut_h[i]+j*stripSize,
currStripSize)) {
fprintf(stderr, "Error while encoding image %d with libTiff\n", i);
break;
}
}
// need to find a way to have libTiff to encode in
// memory without writing to disk the last direnctory
// after each TIFFWriteDirectory() call
TIFFWriteDirectory(tif);
//TIFFRewriteDirectory(tif);
}
__t = Wtime()-__t;
printf("done in %lf secs\n\n", __t);
TIFFClose(tif);
}
#endif
CHECK_CUDA(cudaFree(stripSize_d));
CHECK_CUDA(cudaFree(stripOffs_d));
CHECK_CUDA(cudaFree(stripData_d));
free(bitsPerSample);
nvTiffEncodeCtxDestroy(ctx);
}
// cleanup
for(unsigned int i = 0; i < nDecode; i++) {
CHECK_CUDA(cudaFree(imageOut_d[i]));
}
free(imageOut_d);
free(fname);
if(tiff_stream) {
CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream));
}
if(decoder){
CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream));
}
CHECK_CUDA(cudaStreamDestroy(stream));
CHECK_CUDA(cudaDeviceReset());
return 0;
}
|
323df6e9e614588743aa8535de3ca5791b7473e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "sort.hpp"
#include "checkCudaErrors.hpp"
#include "cudaMemory.hpp"
#include "functions.hpp"
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <limits>
#include <utility>
#include "scan.hpp"
#include "reduce.hpp"
// -------------------- GPU Parallel Sort (thrust) --------------------
void thrustGPUsort(
unsigned int* const d_in,
const unsigned int length
) {
thrust::sort(thrust::device, d_in, d_in + length);
}
void thrustGPUsort(
std::pair<unsigned int, unsigned int>* const d_in,
const unsigned int length
) {
thrust::sort(
thrust::device,
(thrust::pair<unsigned int, unsigned int>*) d_in,
(thrust::pair<unsigned int, unsigned int>*) d_in + length
);
}
// -------------------- GPU Parallel Sort (Bitonic Sort & Merge Sort) --------------------
// memory address; ( ) splitter network ( ) half_cleaner network
// 0 ---------------------------------------------------------------------------------------------------------...
// idx=1-> | | | <-idx=1 | | | | | | | ...
// 1 ---------------------------------------------------------------------------------------------------------...
// idx=1-> | | <-idx=2 | | | | | | | | | | ...
// 2 ---------------------------------------------------------------------------------------------------------...
// idx=2-> | | | <-idx=2 | | | | | | | | | | | | | ...
// 3 ---------------------------------------------------------------------------------------------------------...
// | | | | | | | | | | | | ...
// 4 ---------------------------------------------------------------------------------------------------------...
// idx=3-> | | | <-idx=3 | | | | | | | | | | | | | | | ...
// 5 ---------------------------------------------------------------------------------------------------------...
// idx=3-> | | <-idx=4 | | | | | | | | | | | | | | ...
// 6 ---------------------------------------------------------------------------------------------------------...
// idx=4-> | | | <-idx=4 | | | | | | | | | | | | | ...
// 7 ---------------------------------------------------------------------------------------------------------...
//
// i: |<- 1 ->| |<----- 2 ---->| |<------------ 4 -------->| |<-------------------- 8 ------------------->|
// j: x xxx <1> xxxxxxx <-2-> <1> xxxxxxxxxxxxxxx <-- 4 --> <-2-> <1>
// i represents number of threads needed to sort chunck of 2^(i+1) elements
template <typename T>
__device__ __forceinline__ void splitterKeys(
T* sh_keys,
unsigned int idx,
unsigned int i
) {
unsigned int mask = i - 1;
unsigned int lsbIdx = (idx&(i - 1)); // first log_2(i) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(i) by one to the left
unsigned int address2 = address1^(mask + i); // flip all bits <= log_2(i)
if ( sh_keys[address1] > sh_keys[address2] )
thrust::swap(sh_keys[address1], sh_keys[address2]);
}
template <typename K, typename V>
__device__ __forceinline__ void splitterKeyValues(
K* sh_keys,
V* sh_vals,
unsigned int idx,
unsigned int i
) {
unsigned int mask = i - 1;
unsigned int lsbIdx = (idx&(i - 1)); // first log_2(i) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(i) by one to the left
unsigned int address2 = address1^(mask + i); // flip all bits <= log_2(i)
if ( sh_keys[address1] > sh_keys[address2] ) {
thrust::swap(sh_keys[address1], sh_keys[address2]);
thrust::swap(sh_vals[address1], sh_vals[address2]);
}
}
template <typename T>
__device__ __forceinline__ void halfCleanerKeys(
T* sh_keys,
unsigned int idx,
unsigned int j
) {
unsigned int lsbIdx = (idx&(j - 1)); // first log_2(j) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(j) by one to the left
unsigned int address2 = address1 + j;
if ( sh_keys[address1] > sh_keys[address2] )
thrust::swap(sh_keys[address1], sh_keys[address2]);
}
template <typename K, typename V>
__device__ __forceinline__ void halfCleanerKeyValues(
K* sh_keys,
V* sh_vals,
unsigned int idx,
unsigned int j
) {
unsigned int lsbIdx = (idx&(j - 1)); // first log_2(j) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(j) by one to the left
unsigned int address2 = address1 + j;
if ( sh_keys[address1] > sh_keys[address2] ) {
thrust::swap(sh_keys[address1], sh_keys[address2]);
thrust::swap(sh_vals[address1], sh_vals[address2]);
}
}
template <typename T>
__global__ void kernelBitonicSort2048(
T* const d_in,
const unsigned int length,
const T T_max
) {
// this is needed for dynamically allocated shared memeory, else one will have name conflicts
extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[];
T* sh_data = reinterpret_cast<T*>(sh_mem);
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idx = threadIdx.x;
unsigned int bDim = blockDim.x;
// load data and perform the first comparison swap (i.e. i=1)
if (2*absIdx + 1 < length) {
sh_data[2*idx ] = min( d_in[2*absIdx], d_in[2*absIdx + 1] );
sh_data[2*idx + 1] = max( d_in[2*absIdx], d_in[2*absIdx + 1] );
} else if (2*absIdx == length - 1) {
sh_data[2*idx ] = d_in[2*absIdx];
sh_data[2*idx + 1] = T_max;
} else {
sh_data[2*idx ] = T_max;
sh_data[2*idx + 1] = T_max;
}
__syncthreads();
unsigned int i, j;
for (i = 2; i <= warpSize; i <<= 1) { // warps are synchronised
splitterKeys(sh_data, idx, i);
for (j = i>>1; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeys(sh_data, idx, j);
}
}
__syncthreads();
for ( ; i <= bDim; i <<= 1) {
splitterKeys(sh_data, idx, i);
__syncthreads();
for (j = i>>1; j > warpSize; j >>= 1) {
halfCleanerKeys(sh_data, idx, j);
__syncthreads();
}
for ( ; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeys(sh_data, idx, j);
}
__syncthreads();
}
if (2*absIdx < length)
d_in[2*absIdx] = sh_data[2*idx];
if (2*absIdx + 1 < length)
d_in[2*absIdx + 1] = sh_data[2*idx + 1];
}
template <typename T>
__device__ __forceinline__ unsigned int getPosition(
const T element,
const T* const d_data,
unsigned int first,
unsigned int last,
bool equal
) {
unsigned int mid = first + (last - first)/2;
while (mid != first) {
if (element < d_data[mid])
last = mid;
else
first = mid;
mid = first + (last - first)/2;
}
if (equal)
return (element <= d_data[first])? first : last;
else
return (element < d_data[first])? first : last;
}
template <typename T>
__global__ void kernelParallelMerge(
const T* const d_in,
const unsigned int length,
const unsigned int len_sorted_chunk,
const unsigned int exponent,
T* const d_out
) {
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (absIdx >= length)
return;
unsigned int chunckIdx = absIdx>>exponent;
unsigned int chunckFirst = chunckIdx<<exponent;
unsigned int mergedFirst, searchFirst, searchLast, newPosition;
bool equal;
if ((chunckIdx&1) == 0) {
mergedFirst = chunckFirst;
searchFirst = chunckFirst + len_sorted_chunk;
searchLast = min(searchFirst + len_sorted_chunk, length);
equal = false;
} else {
mergedFirst = chunckFirst - len_sorted_chunk;
searchFirst = mergedFirst;
searchLast = chunckFirst;
equal = true;
}
if (searchFirst >= length)
return;
newPosition = absIdx - chunckFirst;
newPosition += getPosition(d_in[absIdx], d_in, searchFirst, searchLast, equal) - searchFirst;
newPosition += mergedFirst;
d_out[newPosition] = d_in[absIdx];
}
template <typename T>
void pGPUsort(
T* d_in,
const unsigned int length,
const T T_max
) {
dim3 blockDim(1024, 1, 1);
unsigned int gridX = ui_ceilDiv(length, 2*blockDim.x);
dim3 gridDim(gridX, 1, 1);
hipLaunchKernelGGL(( kernelBitonicSort2048), dim3(gridDim), dim3(blockDim), 2*blockDim.x*sizeof(T), 0, d_in, length, T_max);
gridX = ui_ceilDiv(length, blockDim.x);
gridDim.x = gridX;
T* d_inter;
allocCudaMem((void**) &d_inter, length*sizeof(T)); // gpuMemFree((void**) &d_inter);
T * d_1 = d_in, * d_2 = d_inter;
unsigned int exponent = 11; // 2^11 = 2048
for (unsigned int lenSortedChunk = 2048; lenSortedChunk < length; lenSortedChunk <<= 1) {
hipLaunchKernelGGL(( kernelParallelMerge), dim3(gridDim), dim3(blockDim), 0, 0, d_1, length, lenSortedChunk, exponent, d_2);
std::swap(d_1, d_2);
exponent++;
}
memcpyGPUtoGPU((void*) d_1, (void*) d_in, length*sizeof(T));
gpuMemFree((void**) &d_inter);
}
void parallelGPUsort(
unsigned int* const d_in,
const unsigned int length
) {
unsigned int T_max = std::numeric_limits<unsigned int>::max();
pGPUsort(d_in, length, T_max);
}
void parallelGPUsort(
std::pair<unsigned int, unsigned int>* const d_in,
const unsigned int length
) {
unsigned int UI_MAX = std::numeric_limits<unsigned int>::max();
auto T_max = thrust::pair<unsigned int, unsigned int>(UI_MAX, UI_MAX);
pGPUsort( (thrust::pair<unsigned int, unsigned int>*) d_in, length, T_max );
}
// -------------------- GPU Parallel Radix Sort --------------------
__device__ __forceinline__ unsigned int getKey(
unsigned int key
) {
return key;
}
__device__ __forceinline__ unsigned int getKey(
thrust::pair<unsigned int, unsigned int> element
) {
return element.first;
}
template <typename T>
__global__ void kernelSortTile(
T* const d_in,
const unsigned int length,
const T T_max,
const unsigned int r_shift,
const unsigned int full_mask,
const unsigned int n_bins,
unsigned int* const d_hist,
unsigned int* const d_offsets
) {
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idx = threadIdx.x;
unsigned int bDim = blockDim.x;
unsigned int hbDim = bDim>>1;
unsigned int gDim = gridDim.x;
// this is needed for dynamically allocated shared memeory, else one will have name conflicts
extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[];
T* sh_data = reinterpret_cast<T*>(sh_mem);
T* sh_vals = sh_data; // length = bDim
unsigned int* sh_keys = (unsigned int*) &sh_vals[bDim]; // length = bDim
unsigned int* sh_hist = (unsigned int*) &sh_keys[bDim]; // length = n_bins
if (idx < n_bins) // NOTE: this works only in case when (n_bins < bDim), else sh_hist will contain rubbish
sh_hist[idx] = 0U;
unsigned int key;
T val;
if (absIdx < length)
val = d_in[absIdx];
else
val = T_max;
key = getKey(val);
key = (key >> r_shift) & full_mask;
__syncthreads();
atomicAdd(&sh_hist[key], 1);
key = (key << 16) + idx;
sh_keys[idx] = key;
sh_vals[idx] = val;
if(idx >= hbDim)
return;
__syncthreads();
// Bitonic sort
unsigned int i, j;
for (i = 1; i <= warpSize; i <<= 1) { // warps are synchronised
splitterKeyValues(sh_keys, sh_vals, idx, i);
for (j = i>>1; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeyValues(sh_keys, sh_vals, idx, j);
}
}
__syncthreads();
for ( ; i <= hbDim; i <<= 1) {
splitterKeyValues(sh_keys, sh_vals, idx, i);
__syncthreads();
for (j = i>>1; j > warpSize; j >>= 1) {
halfCleanerKeyValues(sh_keys, sh_vals, idx, j);
__syncthreads();
}
for ( ; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeyValues(sh_keys, sh_vals, idx, j);
}
__syncthreads();
}
// Copy data to global memory
if (absIdx + hbDim < length)
d_in[absIdx + hbDim] = sh_vals[idx + hbDim];
if (absIdx < length)
d_in[absIdx] = sh_vals[idx];
// NOTE: this works only in case when (n_bins < warpSize < bDim/2)
if (idx >= n_bins)
return;
d_hist[blockIdx.x + idx*gDim] = sh_hist[idx];
// scan [sh_hist, sh_hist + n_bin)
for (unsigned int i = 1; i < n_bins-1; i <<= 1) {
if (idx >= i) {
sh_hist[idx] += sh_hist[idx - i];
}
}
d_offsets[blockIdx.x*n_bins + idx] = sh_hist[idx];
}
template <typename T>
__global__ void kernelMoveElements(
T* const d_in,
const unsigned int length,
const unsigned int r_shift,
const unsigned int full_mask,
const unsigned int n_bins,
const unsigned int* d_scan,
const unsigned int* const d_offsets,
T* const d_out
) {
// this is needed for dynamically allocated shared memeory, else one will have name conflicts
extern __shared__ __align__(sizeof(unsigned int)) unsigned char sh_mem[];
unsigned int* sh_hist = reinterpret_cast<unsigned int*>(sh_mem);
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idx = threadIdx.x;
unsigned int gDim = gridDim.x;
if (absIdx >= length)
return;
if (idx == 0)
sh_hist[0] = 0;
if (idx < n_bins-1) {
sh_hist[idx + 1] = d_offsets[blockIdx.x*n_bins + idx];
}
T val = d_in[absIdx];
unsigned int key = getKey(val);
unsigned int bin = (key >> r_shift) & full_mask;
__syncthreads();
unsigned int newPosition = d_scan[blockIdx.x + bin*gDim] + idx - sh_hist[bin];
d_out[newPosition] = val;
}
template <typename T>
unsigned int getAllBits(
T* const d_in,
const unsigned int length
) {
unsigned int allBits;
if ( sizeof(T) == sizeof(unsigned int) )
allBits = parallelGPUreduceBitOr( (unsigned int*)d_in, length );
else
allBits = parallelGPUreduceBitOr( (std::pair<unsigned int, unsigned int>*)d_in, length );
return allBits;
}
template <typename T>
void pGPUradixsort(
T* const d_in,
const unsigned int length,
const T T_max
) {
dim3 blockDim(128, 1, 1);
unsigned int gridX = ui_ceilDiv(length, blockDim.x);
dim3 gridDim(gridX, 1, 1);
unsigned int n_bits = 5; // works only for n_bins < warpSize
unsigned int n_bins = 1 << n_bits; // 2^n_bins
unsigned int fullMask = (1 << n_bits) - 1;
unsigned int sh_len1 = (blockDim.x)*sizeof(T) + (blockDim.x + n_bins)*sizeof(unsigned int);
unsigned int sh_len2 = n_bins*sizeof(unsigned int);
unsigned int allBits = getAllBits(d_in, length);
unsigned int bitpos = 0;
while (allBits != 0) {
bitpos += 1;
allBits >>= 1;
}
unsigned int * d_hist, * d_offsets;
allocCudaMem((void**) &d_hist, (gridDim.x*n_bins + 1)*sizeof(unsigned int)); // gpuMemFree((void**) &d_hist);
allocCudaMem((void**) &d_offsets, (gridDim.x*n_bins )*sizeof(unsigned int)); // gpuMemFree((void**) &d_offsets);
T* d_tmp;
allocCudaMem((void**) &d_tmp, length*sizeof(T)); // gpuMemFree((void**) &d_tmp);
T* d_1 = d_in, * d_2 = d_tmp;
for (unsigned int r_shift = 0; r_shift < bitpos; r_shift += n_bits) {
if (bitpos - r_shift < n_bits) {
n_bits = bitpos - r_shift;
n_bins = 1 << n_bits;
fullMask = (1 << n_bits) - 1;
sh_len1 = (blockDim.x)*sizeof(T) + (blockDim.x + n_bins)*sizeof(unsigned int);
sh_len2 = n_bins*sizeof(unsigned int);
}
hipLaunchKernelGGL(( kernelSortTile), dim3(gridDim), dim3(blockDim), sh_len1, 0, d_1, length, T_max, r_shift, fullMask, n_bins, d_hist+1, d_offsets);
parallelGPUscan(d_hist+1, n_bins*gridDim.x, d_hist+1);
hipLaunchKernelGGL(( kernelMoveElements), dim3(gridDim), dim3(blockDim), sh_len2, 0, d_1, length, r_shift, fullMask, n_bins, d_hist, d_offsets, d_2);
std::swap(d_1, d_2);
}
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
if (d_1 != d_in)
memcpyGPUtoGPU((void*) d_1, (void*) d_in, length*sizeof(T));
gpuMemFree((void**) &d_hist);
gpuMemFree((void**) &d_offsets);
gpuMemFree((void**) &d_tmp);
}
void parallelGPUradixsort(
unsigned int* const d_in,
const unsigned int length
) {
unsigned int UI_MAX = std::numeric_limits<unsigned int>::max();
pGPUradixsort(d_in, length, UI_MAX);
}
void parallelGPUradixsort(
std::pair<unsigned int, unsigned int>* const d_in,
const unsigned int length
) {
unsigned int UI_MAX = std::numeric_limits<unsigned int>::max();
auto T_max = thrust::pair<unsigned int, unsigned int>(UI_MAX, UI_MAX);
pGPUradixsort( (thrust::pair<unsigned int, unsigned int>*)d_in, length, T_max );
}
|
323df6e9e614588743aa8535de3ca5791b7473e1.cu
|
#include "sort.hpp"
#include "checkCudaErrors.hpp"
#include "cudaMemory.hpp"
#include "functions.hpp"
#include <cuda.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <limits>
#include <utility>
#include "scan.hpp"
#include "reduce.hpp"
// -------------------- GPU Parallel Sort (thrust) --------------------
void thrustGPUsort(
unsigned int* const d_in,
const unsigned int length
) {
thrust::sort(thrust::device, d_in, d_in + length);
}
void thrustGPUsort(
std::pair<unsigned int, unsigned int>* const d_in,
const unsigned int length
) {
thrust::sort(
thrust::device,
(thrust::pair<unsigned int, unsigned int>*) d_in,
(thrust::pair<unsigned int, unsigned int>*) d_in + length
);
}
// -------------------- GPU Parallel Sort (Bitonic Sort & Merge Sort) --------------------
// ⇩ memory address; (⇩ ⇩ ⇩ ⇩) splitter network (⇩ ⇩ ⇩ ⇩) half_cleaner network
// 0 -----◯--------------◯------◯----------------◯----------◯------◯-----------◯------------------◯----------◯------◯---...
// idx=1-> | | | <-idx=1 | | | | | | | ...
// 1 -----◯----------------◯----◯------------------◯----------◯----◯-------------◯------------------◯----------◯----◯---...
// idx=1-> | | <-idx=2 | | | | | | | | | | ...
// 2 -----◯----------------◯----◯--------------------◯------◯------◯---------------◯------------------◯------◯------◯---...
// idx=2-> | | | <-idx=2 | | | | | | | | | | | | | ...
// 3 -----◯--------------◯------◯----------------------◯------◯----◯-----------------◯------------------◯------◯----◯---...
// | | | | | | | | | | | | ...
// 4 -----◯--------------◯------◯----------------------◯----◯------◯-------------------◯----------◯----------◯------◯---...
// idx=3-> | | | <-idx=3 | | | | | | | | | | | | | | | ...
// 5 -----◯----------------◯----◯--------------------◯--------◯----◯---------------------◯----------◯----------◯----◯---...
// idx=3-> | | <-idx=4 | | | | | | | | | | | | | | ...
// 6 -----◯----------------◯----◯------------------◯--------◯------◯-----------------------◯----------◯------◯------◯---...
// idx=4-> | | | <-idx=4 | | | | | | | | | | | | | ...
// 7 -----◯--------------◯------◯----------------◯------------◯----◯-------------------------◯----------◯------◯----◯---...
// ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮ ⋮
// i: |<- 1 ->| |<----- 2 ---->| |<------------ 4 -------->| |<-------------------- 8 ------------------->|
// j: x xxx <1> xxxxxxx <-2-> <1> xxxxxxxxxxxxxxx <-- 4 --> <-2-> <1>
// i represents number of threads needed to sort chunck of 2^(i+1) elements
template <typename T>
__device__ __forceinline__ void splitterKeys(
T* sh_keys,
unsigned int idx,
unsigned int i
) {
unsigned int mask = i - 1;
unsigned int lsbIdx = (idx&(i - 1)); // first log_2(i) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(i) by one to the left
unsigned int address2 = address1^(mask + i); // flip all bits <= log_2(i)
if ( sh_keys[address1] > sh_keys[address2] )
thrust::swap(sh_keys[address1], sh_keys[address2]);
}
template <typename K, typename V>
__device__ __forceinline__ void splitterKeyValues(
K* sh_keys,
V* sh_vals,
unsigned int idx,
unsigned int i
) {
unsigned int mask = i - 1;
unsigned int lsbIdx = (idx&(i - 1)); // first log_2(i) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(i) by one to the left
unsigned int address2 = address1^(mask + i); // flip all bits <= log_2(i)
if ( sh_keys[address1] > sh_keys[address2] ) {
thrust::swap(sh_keys[address1], sh_keys[address2]);
thrust::swap(sh_vals[address1], sh_vals[address2]);
}
}
template <typename T>
__device__ __forceinline__ void halfCleanerKeys(
T* sh_keys,
unsigned int idx,
unsigned int j
) {
unsigned int lsbIdx = (idx&(j - 1)); // first log_2(j) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(j) by one to the left
unsigned int address2 = address1 + j;
if ( sh_keys[address1] > sh_keys[address2] )
thrust::swap(sh_keys[address1], sh_keys[address2]);
}
template <typename K, typename V>
__device__ __forceinline__ void halfCleanerKeyValues(
K* sh_keys,
V* sh_vals,
unsigned int idx,
unsigned int j
) {
unsigned int lsbIdx = (idx&(j - 1)); // first log_2(j) least significant bits from idx
unsigned int address1 = ((idx - lsbIdx) << 1) + lsbIdx; // move all bits of idx with position > log_2(j) by one to the left
unsigned int address2 = address1 + j;
if ( sh_keys[address1] > sh_keys[address2] ) {
thrust::swap(sh_keys[address1], sh_keys[address2]);
thrust::swap(sh_vals[address1], sh_vals[address2]);
}
}
template <typename T>
__global__ void kernelBitonicSort2048(
T* const d_in,
const unsigned int length,
const T T_max
) {
// this is needed for dynamically allocated shared memeory, else one will have name conflicts
extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[];
T* sh_data = reinterpret_cast<T*>(sh_mem);
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idx = threadIdx.x;
unsigned int bDim = blockDim.x;
// load data and perform the first comparison swap (i.e. i=1)
if (2*absIdx + 1 < length) {
sh_data[2*idx ] = min( d_in[2*absIdx], d_in[2*absIdx + 1] );
sh_data[2*idx + 1] = max( d_in[2*absIdx], d_in[2*absIdx + 1] );
} else if (2*absIdx == length - 1) {
sh_data[2*idx ] = d_in[2*absIdx];
sh_data[2*idx + 1] = T_max;
} else {
sh_data[2*idx ] = T_max;
sh_data[2*idx + 1] = T_max;
}
__syncthreads();
unsigned int i, j;
for (i = 2; i <= warpSize; i <<= 1) { // warps are synchronised
splitterKeys(sh_data, idx, i);
for (j = i>>1; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeys(sh_data, idx, j);
}
}
__syncthreads();
for ( ; i <= bDim; i <<= 1) {
splitterKeys(sh_data, idx, i);
__syncthreads();
for (j = i>>1; j > warpSize; j >>= 1) {
halfCleanerKeys(sh_data, idx, j);
__syncthreads();
}
for ( ; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeys(sh_data, idx, j);
}
__syncthreads();
}
if (2*absIdx < length)
d_in[2*absIdx] = sh_data[2*idx];
if (2*absIdx + 1 < length)
d_in[2*absIdx + 1] = sh_data[2*idx + 1];
}
template <typename T>
__device__ __forceinline__ unsigned int getPosition(
const T element,
const T* const d_data,
unsigned int first,
unsigned int last,
bool equal
) {
unsigned int mid = first + (last - first)/2;
while (mid != first) {
if (element < d_data[mid])
last = mid;
else
first = mid;
mid = first + (last - first)/2;
}
if (equal)
return (element <= d_data[first])? first : last;
else
return (element < d_data[first])? first : last;
}
template <typename T>
__global__ void kernelParallelMerge(
const T* const d_in,
const unsigned int length,
const unsigned int len_sorted_chunk,
const unsigned int exponent,
T* const d_out
) {
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (absIdx >= length)
return;
unsigned int chunckIdx = absIdx>>exponent;
unsigned int chunckFirst = chunckIdx<<exponent;
unsigned int mergedFirst, searchFirst, searchLast, newPosition;
bool equal;
if ((chunckIdx&1) == 0) {
mergedFirst = chunckFirst;
searchFirst = chunckFirst + len_sorted_chunk;
searchLast = min(searchFirst + len_sorted_chunk, length);
equal = false;
} else {
mergedFirst = chunckFirst - len_sorted_chunk;
searchFirst = mergedFirst;
searchLast = chunckFirst;
equal = true;
}
if (searchFirst >= length)
return;
newPosition = absIdx - chunckFirst;
newPosition += getPosition(d_in[absIdx], d_in, searchFirst, searchLast, equal) - searchFirst;
newPosition += mergedFirst;
d_out[newPosition] = d_in[absIdx];
}
template <typename T>
void pGPUsort(
T* d_in,
const unsigned int length,
const T T_max
) {
dim3 blockDim(1024, 1, 1);
unsigned int gridX = ui_ceilDiv(length, 2*blockDim.x);
dim3 gridDim(gridX, 1, 1);
kernelBitonicSort2048<<<gridDim, blockDim, 2*blockDim.x*sizeof(T)>>>(d_in, length, T_max);
gridX = ui_ceilDiv(length, blockDim.x);
gridDim.x = gridX;
T* d_inter;
allocCudaMem((void**) &d_inter, length*sizeof(T)); // gpuMemFree((void**) &d_inter);
T * d_1 = d_in, * d_2 = d_inter;
unsigned int exponent = 11; // 2^11 = 2048
for (unsigned int lenSortedChunk = 2048; lenSortedChunk < length; lenSortedChunk <<= 1) {
kernelParallelMerge<<<gridDim, blockDim>>>(d_1, length, lenSortedChunk, exponent, d_2);
std::swap(d_1, d_2);
exponent++;
}
memcpyGPUtoGPU((void*) d_1, (void*) d_in, length*sizeof(T));
gpuMemFree((void**) &d_inter);
}
void parallelGPUsort(
unsigned int* const d_in,
const unsigned int length
) {
unsigned int T_max = std::numeric_limits<unsigned int>::max();
pGPUsort(d_in, length, T_max);
}
void parallelGPUsort(
std::pair<unsigned int, unsigned int>* const d_in,
const unsigned int length
) {
unsigned int UI_MAX = std::numeric_limits<unsigned int>::max();
auto T_max = thrust::pair<unsigned int, unsigned int>(UI_MAX, UI_MAX);
pGPUsort( (thrust::pair<unsigned int, unsigned int>*) d_in, length, T_max );
}
// -------------------- GPU Parallel Radix Sort --------------------
__device__ __forceinline__ unsigned int getKey(
unsigned int key
) {
return key;
}
__device__ __forceinline__ unsigned int getKey(
thrust::pair<unsigned int, unsigned int> element
) {
return element.first;
}
template <typename T>
__global__ void kernelSortTile(
T* const d_in,
const unsigned int length,
const T T_max,
const unsigned int r_shift,
const unsigned int full_mask,
const unsigned int n_bins,
unsigned int* const d_hist,
unsigned int* const d_offsets
) {
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idx = threadIdx.x;
unsigned int bDim = blockDim.x;
unsigned int hbDim = bDim>>1;
unsigned int gDim = gridDim.x;
// this is needed for dynamically allocated shared memeory, else one will have name conflicts
extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[];
T* sh_data = reinterpret_cast<T*>(sh_mem);
T* sh_vals = sh_data; // length = bDim
unsigned int* sh_keys = (unsigned int*) &sh_vals[bDim]; // length = bDim
unsigned int* sh_hist = (unsigned int*) &sh_keys[bDim]; // length = n_bins
if (idx < n_bins) // NOTE: this works only in case when (n_bins < bDim), else sh_hist will contain rubbish
sh_hist[idx] = 0U;
unsigned int key;
T val;
if (absIdx < length)
val = d_in[absIdx];
else
val = T_max;
key = getKey(val);
key = (key >> r_shift) & full_mask;
__syncthreads();
atomicAdd(&sh_hist[key], 1);
key = (key << 16) + idx;
sh_keys[idx] = key;
sh_vals[idx] = val;
if(idx >= hbDim)
return;
__syncthreads();
// Bitonic sort
unsigned int i, j;
for (i = 1; i <= warpSize; i <<= 1) { // warps are synchronised
splitterKeyValues(sh_keys, sh_vals, idx, i);
for (j = i>>1; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeyValues(sh_keys, sh_vals, idx, j);
}
}
__syncthreads();
for ( ; i <= hbDim; i <<= 1) {
splitterKeyValues(sh_keys, sh_vals, idx, i);
__syncthreads();
for (j = i>>1; j > warpSize; j >>= 1) {
halfCleanerKeyValues(sh_keys, sh_vals, idx, j);
__syncthreads();
}
for ( ; j > 0; j >>= 1) { // warps are synchronised
halfCleanerKeyValues(sh_keys, sh_vals, idx, j);
}
__syncthreads();
}
// Copy data to global memory
if (absIdx + hbDim < length)
d_in[absIdx + hbDim] = sh_vals[idx + hbDim];
if (absIdx < length)
d_in[absIdx] = sh_vals[idx];
// NOTE: this works only in case when (n_bins < warpSize < bDim/2)
if (idx >= n_bins)
return;
d_hist[blockIdx.x + idx*gDim] = sh_hist[idx];
// scan [sh_hist, sh_hist + n_bin)
for (unsigned int i = 1; i < n_bins-1; i <<= 1) {
if (idx >= i) {
sh_hist[idx] += sh_hist[idx - i];
}
}
d_offsets[blockIdx.x*n_bins + idx] = sh_hist[idx];
}
template <typename T>
__global__ void kernelMoveElements(
T* const d_in,
const unsigned int length,
const unsigned int r_shift,
const unsigned int full_mask,
const unsigned int n_bins,
const unsigned int* d_scan,
const unsigned int* const d_offsets,
T* const d_out
) {
// this is needed for dynamically allocated shared memeory, else one will have name conflicts
extern __shared__ __align__(sizeof(unsigned int)) unsigned char sh_mem[];
unsigned int* sh_hist = reinterpret_cast<unsigned int*>(sh_mem);
unsigned int absIdx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idx = threadIdx.x;
unsigned int gDim = gridDim.x;
if (absIdx >= length)
return;
if (idx == 0)
sh_hist[0] = 0;
if (idx < n_bins-1) {
sh_hist[idx + 1] = d_offsets[blockIdx.x*n_bins + idx];
}
T val = d_in[absIdx];
unsigned int key = getKey(val);
unsigned int bin = (key >> r_shift) & full_mask;
__syncthreads();
unsigned int newPosition = d_scan[blockIdx.x + bin*gDim] + idx - sh_hist[bin];
d_out[newPosition] = val;
}
template <typename T>
unsigned int getAllBits(
T* const d_in,
const unsigned int length
) {
unsigned int allBits;
if ( sizeof(T) == sizeof(unsigned int) )
allBits = parallelGPUreduceBitOr( (unsigned int*)d_in, length );
else
allBits = parallelGPUreduceBitOr( (std::pair<unsigned int, unsigned int>*)d_in, length );
return allBits;
}
template <typename T>
void pGPUradixsort(
T* const d_in,
const unsigned int length,
const T T_max
) {
dim3 blockDim(128, 1, 1);
unsigned int gridX = ui_ceilDiv(length, blockDim.x);
dim3 gridDim(gridX, 1, 1);
unsigned int n_bits = 5; // works only for n_bins < warpSize
unsigned int n_bins = 1 << n_bits; // 2^n_bins
unsigned int fullMask = (1 << n_bits) - 1;
unsigned int sh_len1 = (blockDim.x)*sizeof(T) + (blockDim.x + n_bins)*sizeof(unsigned int);
unsigned int sh_len2 = n_bins*sizeof(unsigned int);
unsigned int allBits = getAllBits(d_in, length);
unsigned int bitpos = 0;
while (allBits != 0) {
bitpos += 1;
allBits >>= 1;
}
unsigned int * d_hist, * d_offsets;
allocCudaMem((void**) &d_hist, (gridDim.x*n_bins + 1)*sizeof(unsigned int)); // gpuMemFree((void**) &d_hist);
allocCudaMem((void**) &d_offsets, (gridDim.x*n_bins )*sizeof(unsigned int)); // gpuMemFree((void**) &d_offsets);
T* d_tmp;
allocCudaMem((void**) &d_tmp, length*sizeof(T)); // gpuMemFree((void**) &d_tmp);
T* d_1 = d_in, * d_2 = d_tmp;
for (unsigned int r_shift = 0; r_shift < bitpos; r_shift += n_bits) {
if (bitpos - r_shift < n_bits) {
n_bits = bitpos - r_shift;
n_bins = 1 << n_bits;
fullMask = (1 << n_bits) - 1;
sh_len1 = (blockDim.x)*sizeof(T) + (blockDim.x + n_bins)*sizeof(unsigned int);
sh_len2 = n_bins*sizeof(unsigned int);
}
kernelSortTile<<<gridDim, blockDim, sh_len1>>>(d_1, length, T_max, r_shift, fullMask, n_bins, d_hist+1, d_offsets);
parallelGPUscan(d_hist+1, n_bins*gridDim.x, d_hist+1);
kernelMoveElements<<<gridDim, blockDim, sh_len2>>>(d_1, length, r_shift, fullMask, n_bins, d_hist, d_offsets, d_2);
std::swap(d_1, d_2);
}
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
if (d_1 != d_in)
memcpyGPUtoGPU((void*) d_1, (void*) d_in, length*sizeof(T));
gpuMemFree((void**) &d_hist);
gpuMemFree((void**) &d_offsets);
gpuMemFree((void**) &d_tmp);
}
void parallelGPUradixsort(
unsigned int* const d_in,
const unsigned int length
) {
unsigned int UI_MAX = std::numeric_limits<unsigned int>::max();
pGPUradixsort(d_in, length, UI_MAX);
}
void parallelGPUradixsort(
std::pair<unsigned int, unsigned int>* const d_in,
const unsigned int length
) {
unsigned int UI_MAX = std::numeric_limits<unsigned int>::max();
auto T_max = thrust::pair<unsigned int, unsigned int>(UI_MAX, UI_MAX);
pGPUradixsort( (thrust::pair<unsigned int, unsigned int>*)d_in, length, T_max );
}
|
6a8f0ffc4682ac989180eaa84e7d3df28f0c2f54.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
6a8f0ffc4682ac989180eaa84e7d3df28f0c2f54.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
RectRecvFluxIntegral.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RectRecvFluxIntegral.cuh"
void calcRectRecvEnergySum(int m, int n, int helioNum, IntegralHelioDeviceArgumet& h_args, ReceiverDeviceArgument& r_args, GaussLegendre& gl_handler, float* d_helio_energy){
int nThreads = 512;
dim3 nBlocks;
GeometryFunc::setThreadsBlocks(nBlocks, nThreads, helioNum*m*n*r_args.numberOfReceivers);
calcHelioRectRecvFlux << <nBlocks, nThreads >> > (h_args, r_args, gl_handler, d_helio_energy, m, n);
hipDeviceSynchronize();
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "%s\n", hipGetErrorString(cudaStatus));
}
}
__global__ void calcRectRecvFluxSum(IntegralHelioDeviceArgumet h_args, ReceiverDeviceArgument r_args, GaussLegendre gl, float* d_total_energy, const int m, const int n) {
float res = calcRectRecvFluxIntegralCore(h_args, r_args, gl, m, n);
if (res < Epsilon) return;
atomicAdd(d_total_energy, res);
}
__global__ void calcHelioRectRecvFlux(IntegralHelioDeviceArgumet h_args, ReceiverDeviceArgument r_args, GaussLegendre gl, float* d_helio_energy, const int m, const int n) {
int myId = GeometryFunc::getThreadId();
if (myId >= m*n*h_args.numberOfHeliostats*r_args.numberOfReceivers) return;
float res = calcRectRecvFluxIntegralCore(h_args, r_args, gl, m, n);
int helioIndex = myId / (m*n*r_args.numberOfReceivers);
atomicAdd(d_helio_energy + helioIndex, res);
}
__device__ float calcRectRecvFluxIntegralCore(IntegralHelioDeviceArgumet& h_args, ReceiverDeviceArgument& r_args, GaussLegendre& gl, const int m, const int n) {
int myId = GeometryFunc::getThreadId();
if (myId >= m*n*h_args.numberOfHeliostats*r_args.numberOfReceivers) return -1;
int helioIndex = myId / (m*n*r_args.numberOfReceivers);
int recvIndex = (myId % (m*n*r_args.numberOfReceivers)) / (m*n);
int row_col = (myId % (m*n*r_args.numberOfReceivers)) % (m*n);
int i = row_col / n;
int j = row_col % n;
return calcRecvFluxIntegralCore(h_args, r_args, gl, helioIndex, recvIndex, i, j, m, n);
}
__device__ float calcRecvFluxIntegralCore(IntegralHelioDeviceArgumet& h_args, ReceiverDeviceArgument& r_args, GaussLegendre& gl, int helioIndex, int recvIndex, int i, int j, int m, int n) {
int focus_idx = h_args.d_focus_index[helioIndex];
float3 focus_pos = r_args.d_recv_focus_pos[focus_idx];
float3 recv_normal = r_args.d_recv_normal[recvIndex];
float3 imgplane_normal = normalize(h_args.d_helio_pos[helioIndex] - focus_pos);
float cos_phi = dot(recv_normal, imgplane_normal);
if (cos_phi < Epsilon) return;
float3 reverse_dir = imgplane_normal; // The normal of image plane
float3* recv_v = r_args.d_recv_vertexes + 4 * recvIndex;
float4* imgplane_m = h_args.d_imgplane_world2local + 4 * helioIndex;
float2 proj_v[4];
float3 inter_v;
float3 h_center_bias = make_float3(0, 0, 0);
float3 i_center_bias = make_float3(0, 0, 0);
float rotate_theta = 0;
if (h_args.d_center_bias) {
h_center_bias = h_args.d_center_bias[helioIndex];
GeometryFunc::calcIntersection(reverse_dir, focus_pos, h_center_bias, -reverse_dir, i_center_bias);
i_center_bias = GeometryFunc::multMatrix(i_center_bias, imgplane_m);
rotate_theta = h_args.d_rotate_theta[helioIndex];
}
for (int i = 0; i < 4; ++i) {
GeometryFunc::calcIntersection(reverse_dir, focus_pos, recv_v[i], reverse_dir, inter_v);
inter_v = GeometryFunc::multMatrix(inter_v, imgplane_m);
proj_v[i] = make_float2(inter_v.x - i_center_bias.x, inter_v.z - i_center_bias.z);
float2 trans_v;
trans_v.x = proj_v[i].x*cos(rotate_theta) + proj_v[i].y*sin(rotate_theta);
trans_v.y = proj_v[i].y*cos(rotate_theta) - proj_v[i].x*sin(rotate_theta);
proj_v[i] = trans_v;
}
float2 row_gap = (proj_v[3] - proj_v[0]) / m;
float2 col_gap = (proj_v[1] - proj_v[0]) / n;
float2 gauss_param = h_args.d_gauss_param[helioIndex];
float l_w_ratio = gauss_param.x;
float sigma = gauss_param.y;
float4 tmp_x = make_float4(
(proj_v[0] + i*row_gap + j*col_gap).x,
(proj_v[0] + (i + 1)*row_gap + j*col_gap).x,
(proj_v[0] + (i + 1)*row_gap + (j + 1)*col_gap).x,
(proj_v[0] + i*row_gap + (j + 1)*col_gap).x
);
float4 tmp_y = make_float4(
(proj_v[0] + i*row_gap + j*col_gap).y,
(proj_v[0] + (i + 1)*row_gap + j*col_gap).y,
(proj_v[0] + (i + 1)*row_gap + (j + 1)*col_gap).y,
(proj_v[0] + i*row_gap + (j + 1)*col_gap).y
);
float sum = gl.calcInte(tmp_x, tmp_y, sigma, l_w_ratio) * h_args.d_factor[helioIndex];
return sum;
}
|
RectRecvFluxIntegral.cu
|
#include "RectRecvFluxIntegral.cuh"
void calcRectRecvEnergySum(int m, int n, int helioNum, IntegralHelioDeviceArgumet& h_args, ReceiverDeviceArgument& r_args, GaussLegendre& gl_handler, float* d_helio_energy){
int nThreads = 512;
dim3 nBlocks;
GeometryFunc::setThreadsBlocks(nBlocks, nThreads, helioNum*m*n*r_args.numberOfReceivers);
calcHelioRectRecvFlux << <nBlocks, nThreads >> > (h_args, r_args, gl_handler, d_helio_energy, m, n);
cudaDeviceSynchronize();
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "%s\n", cudaGetErrorString(cudaStatus));
}
}
__global__ void calcRectRecvFluxSum(IntegralHelioDeviceArgumet h_args, ReceiverDeviceArgument r_args, GaussLegendre gl, float* d_total_energy, const int m, const int n) {
float res = calcRectRecvFluxIntegralCore(h_args, r_args, gl, m, n);
if (res < Epsilon) return;
atomicAdd(d_total_energy, res);
}
__global__ void calcHelioRectRecvFlux(IntegralHelioDeviceArgumet h_args, ReceiverDeviceArgument r_args, GaussLegendre gl, float* d_helio_energy, const int m, const int n) {
int myId = GeometryFunc::getThreadId();
if (myId >= m*n*h_args.numberOfHeliostats*r_args.numberOfReceivers) return;
float res = calcRectRecvFluxIntegralCore(h_args, r_args, gl, m, n);
int helioIndex = myId / (m*n*r_args.numberOfReceivers);
atomicAdd(d_helio_energy + helioIndex, res);
}
__device__ float calcRectRecvFluxIntegralCore(IntegralHelioDeviceArgumet& h_args, ReceiverDeviceArgument& r_args, GaussLegendre& gl, const int m, const int n) {
int myId = GeometryFunc::getThreadId();
if (myId >= m*n*h_args.numberOfHeliostats*r_args.numberOfReceivers) return -1;
int helioIndex = myId / (m*n*r_args.numberOfReceivers);
int recvIndex = (myId % (m*n*r_args.numberOfReceivers)) / (m*n);
int row_col = (myId % (m*n*r_args.numberOfReceivers)) % (m*n);
int i = row_col / n;
int j = row_col % n;
return calcRecvFluxIntegralCore(h_args, r_args, gl, helioIndex, recvIndex, i, j, m, n);
}
__device__ float calcRecvFluxIntegralCore(IntegralHelioDeviceArgumet& h_args, ReceiverDeviceArgument& r_args, GaussLegendre& gl, int helioIndex, int recvIndex, int i, int j, int m, int n) {
int focus_idx = h_args.d_focus_index[helioIndex];
float3 focus_pos = r_args.d_recv_focus_pos[focus_idx];
float3 recv_normal = r_args.d_recv_normal[recvIndex];
float3 imgplane_normal = normalize(h_args.d_helio_pos[helioIndex] - focus_pos);
float cos_phi = dot(recv_normal, imgplane_normal);
if (cos_phi < Epsilon) return;
float3 reverse_dir = imgplane_normal; // The normal of image plane
float3* recv_v = r_args.d_recv_vertexes + 4 * recvIndex;
float4* imgplane_m = h_args.d_imgplane_world2local + 4 * helioIndex;
float2 proj_v[4];
float3 inter_v;
float3 h_center_bias = make_float3(0, 0, 0);
float3 i_center_bias = make_float3(0, 0, 0);
float rotate_theta = 0;
if (h_args.d_center_bias) {
h_center_bias = h_args.d_center_bias[helioIndex];
GeometryFunc::calcIntersection(reverse_dir, focus_pos, h_center_bias, -reverse_dir, i_center_bias);
i_center_bias = GeometryFunc::multMatrix(i_center_bias, imgplane_m);
rotate_theta = h_args.d_rotate_theta[helioIndex];
}
for (int i = 0; i < 4; ++i) {
GeometryFunc::calcIntersection(reverse_dir, focus_pos, recv_v[i], reverse_dir, inter_v);
inter_v = GeometryFunc::multMatrix(inter_v, imgplane_m);
proj_v[i] = make_float2(inter_v.x - i_center_bias.x, inter_v.z - i_center_bias.z);
float2 trans_v;
trans_v.x = proj_v[i].x*cos(rotate_theta) + proj_v[i].y*sin(rotate_theta);
trans_v.y = proj_v[i].y*cos(rotate_theta) - proj_v[i].x*sin(rotate_theta);
proj_v[i] = trans_v;
}
float2 row_gap = (proj_v[3] - proj_v[0]) / m;
float2 col_gap = (proj_v[1] - proj_v[0]) / n;
float2 gauss_param = h_args.d_gauss_param[helioIndex];
float l_w_ratio = gauss_param.x;
float sigma = gauss_param.y;
float4 tmp_x = make_float4(
(proj_v[0] + i*row_gap + j*col_gap).x,
(proj_v[0] + (i + 1)*row_gap + j*col_gap).x,
(proj_v[0] + (i + 1)*row_gap + (j + 1)*col_gap).x,
(proj_v[0] + i*row_gap + (j + 1)*col_gap).x
);
float4 tmp_y = make_float4(
(proj_v[0] + i*row_gap + j*col_gap).y,
(proj_v[0] + (i + 1)*row_gap + j*col_gap).y,
(proj_v[0] + (i + 1)*row_gap + (j + 1)*col_gap).y,
(proj_v[0] + i*row_gap + (j + 1)*col_gap).y
);
float sum = gl.calcInte(tmp_x, tmp_y, sigma, l_w_ratio) * h_args.d_factor[helioIndex];
return sum;
}
|
12337803182cc56e6a04e83a611abccf90410b4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
//~ #include "pcl/gpu/utils/device/block.hpp"
//~ #include "pcl/gpu/utils/device/warp.hpp"
//#include "pcl/gpu/utils/device/vector_math.hpp"
#include "thrust/device_ptr.h"
#include "thrust/scan.h"
#include "templated_extract.cuh"
namespace pcl
{
namespace device
{
namespace kinfuLS
{
//texture<int, 1, hipReadModeElementType> edgeTex;
texture<int, 1, hipReadModeElementType> triTex;
texture<int, 1, hipReadModeElementType> numVertsTex;
void
bindTextures (const int */*edgeBuf*/, const int *triBuf, const int *numVertsBuf)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<int>();
//cudaSafeCall(hipBindTexture(0, edgeTex, edgeBuf, desc) );
cudaSafeCall (hipBindTexture (0, triTex, triBuf, desc) );
cudaSafeCall (hipBindTexture (0, numVertsTex, numVertsBuf, desc) );
}
void
unbindTextures ()
{
//cudaSafeCall( hipUnbindTexture(edgeTex) );
cudaSafeCall ( hipUnbindTexture (numVertsTex) );
cudaSafeCall ( hipUnbindTexture (triTex) );
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct TrianglesExtractor
{
enum
{
MAX_LOCAL_POINTS = 15,
MIN_X_MARGIN = 0,
MIN_Y_MARGIN = 0,
MIN_Z_MARGIN = 0,
};
__device__ TrianglesExtractor() {}
float tranc_dist;
// returns the number of points extracted
__device__ __forceinline__ int filter(const FullScan6& parent,
const pcl::gpu::kinfuLS::tsdf_buffer& buffer, int x, int y, int z)
{
if (x >= (buffer.voxels_size.x - 1) || y >= (buffer.voxels_size.y - 1) || z >= (buffer.voxels_size.z - 1))
return 0;
float f[8];
cube_index = computeCubeIndex (parent, x, y, z, f);
// output triangle vertices
const int numVerts = tex1Dfetch (numVertsTex, cube_index);
if (numVerts != 0)
{
// calculate cell vertex positions
float3 v[8];
v[0] = getNodeCoo (parent, x, y, z);
v[1] = getNodeCoo (parent, x + 1, y, z);
v[2] = getNodeCoo (parent, x + 1, y + 1, z);
v[3] = getNodeCoo (parent, x, y + 1, z);
v[4] = getNodeCoo (parent, x, y, z + 1);
v[5] = getNodeCoo (parent, x + 1, y, z + 1);
v[6] = getNodeCoo (parent, x + 1, y + 1, z + 1);
v[7] = getNodeCoo (parent, x, y + 1, z + 1);
// find the vertices where the surface intersects the cube
// use shared memory to avoid using local
points[0] = vertex_interp (v[0], v[1], f[0], f[1]);
points[1] = vertex_interp (v[1], v[2], f[1], f[2]);
points[2] = vertex_interp (v[2], v[3], f[2], f[3]);
points[3] = vertex_interp (v[3], v[0], f[3], f[0]);
points[4] = vertex_interp (v[4], v[5], f[4], f[5]);
points[5] = vertex_interp (v[5], v[6], f[5], f[6]);
points[6] = vertex_interp (v[6], v[7], f[6], f[7]);
points[7] = vertex_interp (v[7], v[4], f[7], f[4]);
points[8] = vertex_interp (v[0], v[4], f[0], f[4]);
points[9] = vertex_interp (v[1], v[5], f[1], f[5]);
points[10] = vertex_interp (v[2], v[6], f[2], f[6]);
points[11] = vertex_interp (v[3], v[7], f[3], f[7]);
normal.z = -(f[0] + f[1] + f[2] + f[3] - (f[4] + f[5] + f[6] + f[7]));
normal.y = -(f[0] + f[1] - (f[2] + f[3]) + f[4] + f[5] - (f[6] + f[7]));
normal.x = f[1] - f[0] + f[2] - f[3] + f[5] - f[4] + f[6] - f[7];
normal = normalized(normal);
curvature = 0.0;
/*
// curvature computation code
if (x > 0 && y > 0 && z > 0)
{
int weight;
float lz,ly;
const float lx = parent.fetch (x - 1, y , z , weight);
if (weight > 0)
ly = parent.fetch (x , y - 1, z , weight);
if (weight > 0)
lz = parent.fetch (x , y , z - 1, weight);
if (weight > 0)
{
// in a common SDF volume, |F| = 1
// but here, |F| = cell_size / tranc_dist
// so normalize as |F * tranc_dist / cell_size| = 1
// divide by cell_size once more, because double derivative
float3 ddf;
ddf.x = ((f[1] - f[0]) - (f[0] - lx)) / square_float(parent.cell_size.x);
ddf.y = ((f[3] - f[0]) - (f[0] - ly)) / square_float(parent.cell_size.y);
ddf.z = ((f[4] - f[0]) - (f[0] - lz)) / square_float(parent.cell_size.z);
// compute mean curvature
curvature = (fabs(ddf.x + ddf.y + ddf.z) / 3.0) * tranc_dist;
}
}
*/
}
return numVerts;
}
__device__ __forceinline__ float square_float(const float f)
{
return f * f;
}
__device__ __forceinline__ bool isFull(const FullScan6& parent, unsigned int i)
{
return (i >= parent.output_xyz.size);
}
__device__ void store(const FullScan6& parent, int offset_storage, int l)
{
int v = tex1Dfetch (triTex, (cube_index * 16) + l);
float x = points[v].x;
float y = points[v].y;
float z = points[v].z;
float nx = normal.x;
float ny = normal.y;
float nz = normal.z;
float c = curvature;
parent.store_point_normals_curvature (x, y, z, nx, ny, nz, c,
parent.output_xyz.data, parent.output_normals.data, offset_storage);
}
__device__ __forceinline__ int
computeCubeIndex (const FullScan6& parent,int x, int y, int z, float f[8]) const
{
int weight;
f[0] = parent.fetch (x, y, z, weight); if (weight == 0) return 0;
f[1] = parent.fetch (x + 1, y, z, weight); if (weight == 0) return 0;
f[2] = parent.fetch (x + 1, y + 1, z, weight); if (weight == 0) return 0;
f[3] = parent.fetch (x, y + 1, z, weight); if (weight == 0) return 0;
f[4] = parent.fetch (x, y, z + 1, weight); if (weight == 0) return 0;
f[5] = parent.fetch (x + 1, y, z + 1, weight); if (weight == 0) return 0;
f[6] = parent.fetch (x + 1, y + 1, z + 1, weight); if (weight == 0) return 0;
f[7] = parent.fetch (x, y + 1, z + 1, weight); if (weight == 0) return 0;
// calculate flag indicating if each vertex is inside or outside isosurface
int cubeindex;
cubeindex = int(f[0] < isoValue());
cubeindex += int(f[1] < isoValue()) * 2;
cubeindex += int(f[2] < isoValue()) * 4;
cubeindex += int(f[3] < isoValue()) * 8;
cubeindex += int(f[4] < isoValue()) * 16;
cubeindex += int(f[5] < isoValue()) * 32;
cubeindex += int(f[6] < isoValue()) * 64;
cubeindex += int(f[7] < isoValue()) * 128;
return cubeindex;
}
__device__ __forceinline__ float3
getNodeCoo (const FullScan6& parent, int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to volume cell center;
coo.x *= parent.cell_size.x;
coo.y *= parent.cell_size.y;
coo.z *= parent.cell_size.z;
return coo;
}
__device__ __forceinline__ float3
vertex_interp (float3 p0, float3 p1, float f0, float f1) const
{
float t = (isoValue() - f0) / (f1 - f0 + 1e-15f);
float x = p0.x + t * (p1.x - p0.x);
float y = p0.y + t * (p1.y - p0.y);
float z = p0.z + t * (p1.z - p0.z);
return make_float3 (x, y, z);
}
static __device__ __forceinline__ float isoValue() { return 0.f; }
float3 points[12];
float3 normal;
float curvature;
int cube_index;
private:
};
__global__ void
trianglesGeneratorWithNormalsKernel (const FullScan6 tg,float tranc_dist)
{
TrianglesExtractor extractor;
extractor.tranc_dist = tranc_dist;
tg.templatedExtract(extractor);
}
int
generateTrianglesWithNormals (const PtrStep<short2>& volume,
const pcl::gpu::kinfuLS::tsdf_buffer & buffer, float tranc_dist,
DeviceArray<PointType>& output, DeviceArray<PointType>& normals,
PtrStep<int> last_data_transfer_matrix, int & data_transfer_finished)
{
FullScan6 tg;
tg.volume = volume;
tg.cell_size.x = buffer.volume_size.x / buffer.voxels_size.x;
tg.cell_size.y = buffer.volume_size.y / buffer.voxels_size.y;
tg.cell_size.z = buffer.volume_size.z / buffer.voxels_size.z;
tg.output_xyz = output;
tg.output_normals = normals;
tg.data_transfer_completion_matrix = last_data_transfer_matrix;
tg.rolling_buffer = buffer;
dim3 block (FullScan6::CTA_SIZE_X,FullScan6::CTA_SIZE_Y);
dim3 grid (divUp (buffer.voxels_size.x, block.x), divUp (buffer.voxels_size.y, block.y));
tg.init_globals();
hipLaunchKernelGGL(( trianglesGeneratorWithNormalsKernel), dim3(grid), dim3(block), 0, 0, tg,tranc_dist);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size = tg.get_result_size(data_transfer_finished);
return min ((int)size, int(output.size()));
}
}
}
}
|
12337803182cc56e6a04e83a611abccf90410b4a.cu
|
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
//~ #include "pcl/gpu/utils/device/block.hpp"
//~ #include "pcl/gpu/utils/device/warp.hpp"
//#include "pcl/gpu/utils/device/vector_math.hpp"
#include "thrust/device_ptr.h"
#include "thrust/scan.h"
#include "templated_extract.cuh"
namespace pcl
{
namespace device
{
namespace kinfuLS
{
//texture<int, 1, cudaReadModeElementType> edgeTex;
texture<int, 1, cudaReadModeElementType> triTex;
texture<int, 1, cudaReadModeElementType> numVertsTex;
void
bindTextures (const int */*edgeBuf*/, const int *triBuf, const int *numVertsBuf)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
//cudaSafeCall(cudaBindTexture(0, edgeTex, edgeBuf, desc) );
cudaSafeCall (cudaBindTexture (0, triTex, triBuf, desc) );
cudaSafeCall (cudaBindTexture (0, numVertsTex, numVertsBuf, desc) );
}
void
unbindTextures ()
{
//cudaSafeCall( cudaUnbindTexture(edgeTex) );
cudaSafeCall ( cudaUnbindTexture (numVertsTex) );
cudaSafeCall ( cudaUnbindTexture (triTex) );
}
}
}
}
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct TrianglesExtractor
{
enum
{
MAX_LOCAL_POINTS = 15,
MIN_X_MARGIN = 0,
MIN_Y_MARGIN = 0,
MIN_Z_MARGIN = 0,
};
__device__ TrianglesExtractor() {}
float tranc_dist;
// returns the number of points extracted
__device__ __forceinline__ int filter(const FullScan6& parent,
const pcl::gpu::kinfuLS::tsdf_buffer& buffer, int x, int y, int z)
{
if (x >= (buffer.voxels_size.x - 1) || y >= (buffer.voxels_size.y - 1) || z >= (buffer.voxels_size.z - 1))
return 0;
float f[8];
cube_index = computeCubeIndex (parent, x, y, z, f);
// output triangle vertices
const int numVerts = tex1Dfetch (numVertsTex, cube_index);
if (numVerts != 0)
{
// calculate cell vertex positions
float3 v[8];
v[0] = getNodeCoo (parent, x, y, z);
v[1] = getNodeCoo (parent, x + 1, y, z);
v[2] = getNodeCoo (parent, x + 1, y + 1, z);
v[3] = getNodeCoo (parent, x, y + 1, z);
v[4] = getNodeCoo (parent, x, y, z + 1);
v[5] = getNodeCoo (parent, x + 1, y, z + 1);
v[6] = getNodeCoo (parent, x + 1, y + 1, z + 1);
v[7] = getNodeCoo (parent, x, y + 1, z + 1);
// find the vertices where the surface intersects the cube
// use shared memory to avoid using local
points[0] = vertex_interp (v[0], v[1], f[0], f[1]);
points[1] = vertex_interp (v[1], v[2], f[1], f[2]);
points[2] = vertex_interp (v[2], v[3], f[2], f[3]);
points[3] = vertex_interp (v[3], v[0], f[3], f[0]);
points[4] = vertex_interp (v[4], v[5], f[4], f[5]);
points[5] = vertex_interp (v[5], v[6], f[5], f[6]);
points[6] = vertex_interp (v[6], v[7], f[6], f[7]);
points[7] = vertex_interp (v[7], v[4], f[7], f[4]);
points[8] = vertex_interp (v[0], v[4], f[0], f[4]);
points[9] = vertex_interp (v[1], v[5], f[1], f[5]);
points[10] = vertex_interp (v[2], v[6], f[2], f[6]);
points[11] = vertex_interp (v[3], v[7], f[3], f[7]);
normal.z = -(f[0] + f[1] + f[2] + f[3] - (f[4] + f[5] + f[6] + f[7]));
normal.y = -(f[0] + f[1] - (f[2] + f[3]) + f[4] + f[5] - (f[6] + f[7]));
normal.x = f[1] - f[0] + f[2] - f[3] + f[5] - f[4] + f[6] - f[7];
normal = normalized(normal);
curvature = 0.0;
/*
// curvature computation code
if (x > 0 && y > 0 && z > 0)
{
int weight;
float lz,ly;
const float lx = parent.fetch (x - 1, y , z , weight);
if (weight > 0)
ly = parent.fetch (x , y - 1, z , weight);
if (weight > 0)
lz = parent.fetch (x , y , z - 1, weight);
if (weight > 0)
{
// in a common SDF volume, |∇F| = 1
// but here, |∇F| = cell_size / tranc_dist
// so normalize as |∇F * tranc_dist / cell_size| = 1
// divide by cell_size once more, because double derivative
float3 ddf;
ddf.x = ((f[1] - f[0]) - (f[0] - lx)) / square_float(parent.cell_size.x);
ddf.y = ((f[3] - f[0]) - (f[0] - ly)) / square_float(parent.cell_size.y);
ddf.z = ((f[4] - f[0]) - (f[0] - lz)) / square_float(parent.cell_size.z);
// compute mean curvature
curvature = (fabs(ddf.x + ddf.y + ddf.z) / 3.0) * tranc_dist;
}
}
*/
}
return numVerts;
}
__device__ __forceinline__ float square_float(const float f)
{
return f * f;
}
__device__ __forceinline__ bool isFull(const FullScan6& parent, unsigned int i)
{
return (i >= parent.output_xyz.size);
}
__device__ void store(const FullScan6& parent, int offset_storage, int l)
{
int v = tex1Dfetch (triTex, (cube_index * 16) + l);
float x = points[v].x;
float y = points[v].y;
float z = points[v].z;
float nx = normal.x;
float ny = normal.y;
float nz = normal.z;
float c = curvature;
parent.store_point_normals_curvature (x, y, z, nx, ny, nz, c,
parent.output_xyz.data, parent.output_normals.data, offset_storage);
}
__device__ __forceinline__ int
computeCubeIndex (const FullScan6& parent,int x, int y, int z, float f[8]) const
{
int weight;
f[0] = parent.fetch (x, y, z, weight); if (weight == 0) return 0;
f[1] = parent.fetch (x + 1, y, z, weight); if (weight == 0) return 0;
f[2] = parent.fetch (x + 1, y + 1, z, weight); if (weight == 0) return 0;
f[3] = parent.fetch (x, y + 1, z, weight); if (weight == 0) return 0;
f[4] = parent.fetch (x, y, z + 1, weight); if (weight == 0) return 0;
f[5] = parent.fetch (x + 1, y, z + 1, weight); if (weight == 0) return 0;
f[6] = parent.fetch (x + 1, y + 1, z + 1, weight); if (weight == 0) return 0;
f[7] = parent.fetch (x, y + 1, z + 1, weight); if (weight == 0) return 0;
// calculate flag indicating if each vertex is inside or outside isosurface
int cubeindex;
cubeindex = int(f[0] < isoValue());
cubeindex += int(f[1] < isoValue()) * 2;
cubeindex += int(f[2] < isoValue()) * 4;
cubeindex += int(f[3] < isoValue()) * 8;
cubeindex += int(f[4] < isoValue()) * 16;
cubeindex += int(f[5] < isoValue()) * 32;
cubeindex += int(f[6] < isoValue()) * 64;
cubeindex += int(f[7] < isoValue()) * 128;
return cubeindex;
}
__device__ __forceinline__ float3
getNodeCoo (const FullScan6& parent, int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to volume cell center;
coo.x *= parent.cell_size.x;
coo.y *= parent.cell_size.y;
coo.z *= parent.cell_size.z;
return coo;
}
__device__ __forceinline__ float3
vertex_interp (float3 p0, float3 p1, float f0, float f1) const
{
float t = (isoValue() - f0) / (f1 - f0 + 1e-15f);
float x = p0.x + t * (p1.x - p0.x);
float y = p0.y + t * (p1.y - p0.y);
float z = p0.z + t * (p1.z - p0.z);
return make_float3 (x, y, z);
}
static __device__ __forceinline__ float isoValue() { return 0.f; }
float3 points[12];
float3 normal;
float curvature;
int cube_index;
private:
};
__global__ void
trianglesGeneratorWithNormalsKernel (const FullScan6 tg,float tranc_dist)
{
TrianglesExtractor extractor;
extractor.tranc_dist = tranc_dist;
tg.templatedExtract(extractor);
}
int
generateTrianglesWithNormals (const PtrStep<short2>& volume,
const pcl::gpu::kinfuLS::tsdf_buffer & buffer, float tranc_dist,
DeviceArray<PointType>& output, DeviceArray<PointType>& normals,
PtrStep<int> last_data_transfer_matrix, int & data_transfer_finished)
{
FullScan6 tg;
tg.volume = volume;
tg.cell_size.x = buffer.volume_size.x / buffer.voxels_size.x;
tg.cell_size.y = buffer.volume_size.y / buffer.voxels_size.y;
tg.cell_size.z = buffer.volume_size.z / buffer.voxels_size.z;
tg.output_xyz = output;
tg.output_normals = normals;
tg.data_transfer_completion_matrix = last_data_transfer_matrix;
tg.rolling_buffer = buffer;
dim3 block (FullScan6::CTA_SIZE_X,FullScan6::CTA_SIZE_Y);
dim3 grid (divUp (buffer.voxels_size.x, block.x), divUp (buffer.voxels_size.y, block.y));
tg.init_globals();
trianglesGeneratorWithNormalsKernel<<<grid, block>>>(tg,tranc_dist);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size = tg.get_result_size(data_transfer_finished);
return min ((int)size, int(output.size()));
}
}
}
}
|
584e32c262471324876d3100905f745c9835f190.hip
|
// !!! This is a file automatically generated by hipify!!!
#define MAXSIZE 200000
#include <iostream>
#include <string>
#include <fstream> //Writing to files
#include <chrono> //Keep track of time
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
// to remove intellisense highlighting
#include <device_launch_parameters.h>
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <algorithm>
#include "device_launch_parameters.h"
using namespace std::chrono;
int data[MAXSIZE];
//Main CUDA kernel implementing Sieve of Eratosthenes
__global__ static void CUDASieve(int *num, int range, int bNum, int tNum){
const int threadId = threadIdx.x;
const int blockId = blockIdx.x;
int tmp = blockId*tNum + threadId;
while (tmp < range){
int i = 1;
while (((2 * tmp + 3)*i + tmp + 1) < MAXSIZE){
num[(2 * tmp + 3)*i + tmp + 1] = 0;
i++;
}
tmp += bNum * tNum;
}
}
void CUDAFilter(int *number, int size){
for (int i = 0; i<size; i++)
number[i] = 2 * i + 1;
number[0] = 2;
}
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << ms.count() << " millisecs" << std::endl;
}
void CPUgenPrime(uint64_t range, bool mode, std::ofstream &fileOut) {
//Start the clock
steady_clock::time_point ts, te;
ts = steady_clock::now();
fileOut << "\nCPU version\n" << "\nCPU version generating from range (0" << "~" << range << ")\n\n";
//Keep track of results
uint64_t count = 0;
//Outer loop
for (uint64_t i = 0; i < range; i++)
//Inner loop
for (uint64_t j = 2; j*j <= i; j++) {
if (i % j == 0)
break;
else if (j + 1 > sqrt(i)) {
//User wants to see output on screen
if (mode) {
//std::cout << std::fixed << i << "\t";
//fileOut << std::fixed << i << "\t";
count++;
}
//Just write to file if mode is 0
else
{
//fileOut << std::fixed << i << "\t";
count++;
}
}
}
//Stop the clock
te = steady_clock::now();
std::cout << "\n\nTotal number of primes: " << count << std::endl;
reportTime("\nCPU Program Completed in ", te - ts);
fileOut << "\n\nTotal number of primes: " << count << std::endl;
std::cout << "A log file with the current date/time has been placed in the program directory.\n";
std::cout << "--------------------------------------------------------------------------------\n";
}
std::ofstream fileInit(){
//Get current date and time
time_t rawtime;
struct tm * timeinfo;
char buffer[80];
time(&rawtime);
timeinfo = localtime(&rawtime);
//Format in Year-Month-Day_Hour_Minute_Seconds
strftime(buffer, 80, "%y-%m-%d_%H-%M-%S", timeinfo);
std::string dateTime(buffer);
//File handles
std::ofstream fileOut;
fileOut.open("GenPrime_out_" + dateTime + ".txt");
return fileOut;
}
int setupRange(int range) {
if (range == 0) {
std::cout << "[2/3] Please choose the range(3 ~ 500,000): \n";
std::cin >> range;
//Error checking
if (range > 2 && range <= 500000) {
return range;
}
else {
std::cout << "Invalid input for range, value set to default 500,000\n";
return 500000;
}
}
else return range;
}
//Array of MAXSIZE is created and filled with prime numbers, where [i]
//is the prime int and the rest is padded with 0's
//Example: cpudata[i] = {0,1,0,3,0,5,0,7,0,0,0,11,0,0,0...}
void justDoIt(int range, bool mode, std::ofstream& fileOut) {
//Output to file
fileOut << "CUDA Multithreading Sieve of Eratosthenes\n" << "CUDA Multithreading generating from range (0" << "~" << range << ")\n\n";
//Filter out even numbers to simplify calculation
CUDAFilter(data, (range / 2) + 1);
//Initialize arrays
int *gpudata;
int cpudata[MAXSIZE];
//Allocate memory
hipMalloc((void**)&gpudata, sizeof(int)*MAXSIZE);
//Copy to GPU
hipMemcpy(gpudata, data, sizeof(int)*MAXSIZE, hipMemcpyHostToDevice);
//Maximum threads per block for CUDA 5.2 is 1024
int bNum = 96, tNum = 1024;
//Start the clock
steady_clock::time_point ts, te;
ts = steady_clock::now();
//Kernel call on the GPU
CUDASieve << <bNum, tNum, 0 >> >(gpudata, range, bNum, tNum);
//Synchronize the device and the host
hipDeviceSynchronize();
//Copy from GPU back onto host
hipMemcpy(&cpudata, gpudata, sizeof(int)*MAXSIZE, hipMemcpyDeviceToHost);
//Free the memory on the GPU
hipFree(gpudata);
//Reset the device for easy profiling
hipDeviceReset();
//Stop the clock
te = steady_clock::now();
//Display on screen
/*if (mode == 1) {
for (int i = 0; i < MAXSIZE; i++) {
if (cpudata[i] != 0)
printf("%d\t", cpudata[i]);
}
}*/
//Count number of primes
int count = std::count_if(cpudata, cpudata + MAXSIZE, [](int i){ return i; });
std::cout << "\n\nTotal number of primes: " << count-2 << std::endl;
//Write to file
for (int i = 0; i < MAXSIZE; i++) {
if (cpudata[i] != 0) {
fileOut << cpudata[i] << "\t";
}
}
//Show the amount of time
reportTime("GPU Program Completed in ", te - ts);
fileOut << "\n\nTotal number of primes: " << count - 2 << std::endl;
std::cout << "A log file with the current date/time has been placed in the program directory.\n";
std::cout << "--------------------------------------------------------------------------------\n";
}
void menu(int range, bool mode, std::ofstream& fileOut){
std::cout << "[3/3] Please select the version of the program you want to run\n"
<< "1. [*****] CUDA Multithreading Sieve of Eratosthenes version\n"
<< "2. [***] Simple CPU version\n"
<< "3. [**] Run both versions\n"
<< "0. Quit\n"
<< "Option: ";
int mainMenuOption;
std::cin >> mainMenuOption; //Accept user input
switch (mainMenuOption) {
case 0: // User wants to exit
std::cout << "Thank you for testing our program :)\n"
<< "Fork us @ https://github.com/bbershadsky/" << std::endl;
break;
case 1:
std::cout << "CUDA Multithreading generating from range (0" << "~" << range << ")\n";
std::cout << "--------------------------------------------------------------------------------\n";
justDoIt(range, mode, fileOut);
//Close the file handle
fileOut.close();
break;
case 2:
std::cout << "CPU version generating from range (0" << "~" << range << ")\n";
std::cout << "--------------------------------------------------------------------------------\n";
CPUgenPrime(range, mode, fileOut);
//Close the file handle
fileOut.close();
break;
case 3:
std::cout << "Running all available options\n";
justDoIt(range, mode, fileOut);
CPUgenPrime(range, mode, fileOut);
//Close the file handle
fileOut.close();
break;
default:
std::cout << "[Invalid option. Only integers 0-3 are allowed]\n";
menu(range, mode, fileOut);
break;
}
}
void setupScreenMode(int range) {
std::cout << "***Team /dev/null GPU610 PRIME NUMBER GENERATOR v3.5***\n"
<< "[1/3] Would you like to see the output on screen?\n"
<< "0 = NO, write to file only\n"
<< "1 = YES, display on screen\n"
<< "Show on screen?: ";
int mode = 1;
std::cin >> mode;
//Initialize file handle
std::ofstream fileOut = fileInit();
if (mode == 0) {
std::cout << "***Writing output to file only***\n\n";
range = setupRange(range);
menu(range, mode, fileOut);
}
else if (mode == 1) {
std::cout << "***Outputting results on screen***\n\n";
range = setupRange(range);
menu(range, mode, fileOut);
}
else {
std::cout << "[Invalid option selected, default option 0 (output to screen) selected]\n\n";
range = setupRange(range);
menu(range, 1, fileOut);
}
}
//Initialize value to be used in the program using command line arguments
int initRuntimeValue(int argc, char* argv[]){
//Save runtime parameter into local variable, if provided
int range = 500000;
if (argc == 1) {
std::cout << "[No command line parameters provided]\n\n";
return 0;
}
if (argc == 2)
range = std::atoi(argv[1]);
if (range > 2 && range < 500000)
return range;
else {
std::cout << "[Bad input for range parameter (must be <= 500,000)]\n"
<< "Range has been set to 500,000\n";
return range = 500000;
}
}
int main(int argc, char* argv[]) {
//Grab the command line arguments
int range = initRuntimeValue(argc, argv);
//Prompt user for mode (verbose or silent)
setupScreenMode(range);
std::cout << "Thank you for testing our program :)\n"
<< "Fork us @ https://github.com/bbershadsky/" << std::endl;
return 0;
}
/*
CHANGELOG
v1.0 - Generating from simple double loop
v1.0.1 - Command line parameter input
v1.1 - Nicer output format and error feedback
v1.2 - Full 64 bit integer compatibility
v1.3 - Multithreading and CUDA implemented
v2.0 - Completely rewrote program to include menu and multiple run parameters
v3.0 - Full rewrite of CUDAGenPrime to use CUDASieve of Eratosthenes, and initRuntimeValues
v3.1 - Moved new CUDAGenPrime to separate function justDoIt(range);
v3.2 - Reorganized main() into simpler blocks for easier readability and efficiency
v3.3 - Moved most control blocks over to the menu() for easier modification
v3.3.1 - Removed a bunch of unused includes
v3.4 - Successfully fixed file output and implemented count
v3.5 - Final version with usability and performance upgrades
*/
|
584e32c262471324876d3100905f745c9835f190.cu
|
#define MAXSIZE 200000
#include <iostream>
#include <string>
#include <fstream> //Writing to files
#include <chrono> //Keep track of time
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
// to remove intellisense highlighting
#include <device_launch_parameters.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <algorithm>
#include "device_launch_parameters.h"
using namespace std::chrono;
int data[MAXSIZE];
//Main CUDA kernel implementing Sieve of Eratosthenes
__global__ static void CUDASieve(int *num, int range, int bNum, int tNum){
const int threadId = threadIdx.x;
const int blockId = blockIdx.x;
int tmp = blockId*tNum + threadId;
while (tmp < range){
int i = 1;
while (((2 * tmp + 3)*i + tmp + 1) < MAXSIZE){
num[(2 * tmp + 3)*i + tmp + 1] = 0;
i++;
}
tmp += bNum * tNum;
}
}
void CUDAFilter(int *number, int size){
for (int i = 0; i<size; i++)
number[i] = 2 * i + 1;
number[0] = 2;
}
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << ms.count() << " millisecs" << std::endl;
}
void CPUgenPrime(uint64_t range, bool mode, std::ofstream &fileOut) {
//Start the clock
steady_clock::time_point ts, te;
ts = steady_clock::now();
fileOut << "\nCPU version\n" << "\nCPU version generating from range (0" << "~" << range << ")\n\n";
//Keep track of results
uint64_t count = 0;
//Outer loop
for (uint64_t i = 0; i < range; i++)
//Inner loop
for (uint64_t j = 2; j*j <= i; j++) {
if (i % j == 0)
break;
else if (j + 1 > sqrt(i)) {
//User wants to see output on screen
if (mode) {
//std::cout << std::fixed << i << "\t";
//fileOut << std::fixed << i << "\t";
count++;
}
//Just write to file if mode is 0
else
{
//fileOut << std::fixed << i << "\t";
count++;
}
}
}
//Stop the clock
te = steady_clock::now();
std::cout << "\n\nTotal number of primes: " << count << std::endl;
reportTime("\nCPU Program Completed in ", te - ts);
fileOut << "\n\nTotal number of primes: " << count << std::endl;
std::cout << "A log file with the current date/time has been placed in the program directory.\n";
std::cout << "--------------------------------------------------------------------------------\n";
}
std::ofstream fileInit(){
//Get current date and time
time_t rawtime;
struct tm * timeinfo;
char buffer[80];
time(&rawtime);
timeinfo = localtime(&rawtime);
//Format in Year-Month-Day_Hour_Minute_Seconds
strftime(buffer, 80, "%y-%m-%d_%H-%M-%S", timeinfo);
std::string dateTime(buffer);
//File handles
std::ofstream fileOut;
fileOut.open("GenPrime_out_" + dateTime + ".txt");
return fileOut;
}
int setupRange(int range) {
if (range == 0) {
std::cout << "[2/3] Please choose the range(3 ~ 500,000): \n";
std::cin >> range;
//Error checking
if (range > 2 && range <= 500000) {
return range;
}
else {
std::cout << "Invalid input for range, value set to default 500,000\n";
return 500000;
}
}
else return range;
}
//Array of MAXSIZE is created and filled with prime numbers, where [i]
//is the prime int and the rest is padded with 0's
//Example: cpudata[i] = {0,1,0,3,0,5,0,7,0,0,0,11,0,0,0...}
void justDoIt(int range, bool mode, std::ofstream& fileOut) {
//Output to file
fileOut << "CUDA Multithreading Sieve of Eratosthenes\n" << "CUDA Multithreading generating from range (0" << "~" << range << ")\n\n";
//Filter out even numbers to simplify calculation
CUDAFilter(data, (range / 2) + 1);
//Initialize arrays
int *gpudata;
int cpudata[MAXSIZE];
//Allocate memory
cudaMalloc((void**)&gpudata, sizeof(int)*MAXSIZE);
//Copy to GPU
cudaMemcpy(gpudata, data, sizeof(int)*MAXSIZE, cudaMemcpyHostToDevice);
//Maximum threads per block for CUDA 5.2 is 1024
int bNum = 96, tNum = 1024;
//Start the clock
steady_clock::time_point ts, te;
ts = steady_clock::now();
//Kernel call on the GPU
CUDASieve << <bNum, tNum, 0 >> >(gpudata, range, bNum, tNum);
//Synchronize the device and the host
cudaDeviceSynchronize();
//Copy from GPU back onto host
cudaMemcpy(&cpudata, gpudata, sizeof(int)*MAXSIZE, cudaMemcpyDeviceToHost);
//Free the memory on the GPU
cudaFree(gpudata);
//Reset the device for easy profiling
cudaDeviceReset();
//Stop the clock
te = steady_clock::now();
//Display on screen
/*if (mode == 1) {
for (int i = 0; i < MAXSIZE; i++) {
if (cpudata[i] != 0)
printf("%d\t", cpudata[i]);
}
}*/
//Count number of primes
int count = std::count_if(cpudata, cpudata + MAXSIZE, [](int i){ return i; });
std::cout << "\n\nTotal number of primes: " << count-2 << std::endl;
//Write to file
for (int i = 0; i < MAXSIZE; i++) {
if (cpudata[i] != 0) {
fileOut << cpudata[i] << "\t";
}
}
//Show the amount of time
reportTime("GPU Program Completed in ", te - ts);
fileOut << "\n\nTotal number of primes: " << count - 2 << std::endl;
std::cout << "A log file with the current date/time has been placed in the program directory.\n";
std::cout << "--------------------------------------------------------------------------------\n";
}
void menu(int range, bool mode, std::ofstream& fileOut){
std::cout << "[3/3] Please select the version of the program you want to run\n"
<< "1. [*****] CUDA Multithreading Sieve of Eratosthenes version\n"
<< "2. [***] Simple CPU version\n"
<< "3. [**] Run both versions\n"
<< "0. Quit\n"
<< "Option: ";
int mainMenuOption;
std::cin >> mainMenuOption; //Accept user input
switch (mainMenuOption) {
case 0: // User wants to exit
std::cout << "Thank you for testing our program :)\n"
<< "Fork us @ https://github.com/bbershadsky/" << std::endl;
break;
case 1:
std::cout << "CUDA Multithreading generating from range (0" << "~" << range << ")\n";
std::cout << "--------------------------------------------------------------------------------\n";
justDoIt(range, mode, fileOut);
//Close the file handle
fileOut.close();
break;
case 2:
std::cout << "CPU version generating from range (0" << "~" << range << ")\n";
std::cout << "--------------------------------------------------------------------------------\n";
CPUgenPrime(range, mode, fileOut);
//Close the file handle
fileOut.close();
break;
case 3:
std::cout << "Running all available options\n";
justDoIt(range, mode, fileOut);
CPUgenPrime(range, mode, fileOut);
//Close the file handle
fileOut.close();
break;
default:
std::cout << "[Invalid option. Only integers 0-3 are allowed]\n";
menu(range, mode, fileOut);
break;
}
}
void setupScreenMode(int range) {
std::cout << "***Team /dev/null GPU610 PRIME NUMBER GENERATOR v3.5***\n"
<< "[1/3] Would you like to see the output on screen?\n"
<< "0 = NO, write to file only\n"
<< "1 = YES, display on screen\n"
<< "Show on screen?: ";
int mode = 1;
std::cin >> mode;
//Initialize file handle
std::ofstream fileOut = fileInit();
if (mode == 0) {
std::cout << "***Writing output to file only***\n\n";
range = setupRange(range);
menu(range, mode, fileOut);
}
else if (mode == 1) {
std::cout << "***Outputting results on screen***\n\n";
range = setupRange(range);
menu(range, mode, fileOut);
}
else {
std::cout << "[Invalid option selected, default option 0 (output to screen) selected]\n\n";
range = setupRange(range);
menu(range, 1, fileOut);
}
}
//Initialize value to be used in the program using command line arguments
int initRuntimeValue(int argc, char* argv[]){
//Save runtime parameter into local variable, if provided
int range = 500000;
if (argc == 1) {
std::cout << "[No command line parameters provided]\n\n";
return 0;
}
if (argc == 2)
range = std::atoi(argv[1]);
if (range > 2 && range < 500000)
return range;
else {
std::cout << "[Bad input for range parameter (must be <= 500,000)]\n"
<< "Range has been set to 500,000\n";
return range = 500000;
}
}
int main(int argc, char* argv[]) {
//Grab the command line arguments
int range = initRuntimeValue(argc, argv);
//Prompt user for mode (verbose or silent)
setupScreenMode(range);
std::cout << "Thank you for testing our program :)\n"
<< "Fork us @ https://github.com/bbershadsky/" << std::endl;
return 0;
}
/*
CHANGELOG
v1.0 - Generating from simple double loop
v1.0.1 - Command line parameter input
v1.1 - Nicer output format and error feedback
v1.2 - Full 64 bit integer compatibility
v1.3 - Multithreading and CUDA implemented
v2.0 - Completely rewrote program to include menu and multiple run parameters
v3.0 - Full rewrite of CUDAGenPrime to use CUDASieve of Eratosthenes, and initRuntimeValues
v3.1 - Moved new CUDAGenPrime to separate function justDoIt(range);
v3.2 - Reorganized main() into simpler blocks for easier readability and efficiency
v3.3 - Moved most control blocks over to the menu() for easier modification
v3.3.1 - Removed a bunch of unused includes
v3.4 - Successfully fixed file output and implemented count
v3.5 - Final version with usability and performance upgrades
*/
|
4d4c8551025848a4518e1da1381e7422a99686e6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
namespace at {
namespace native {
namespace {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 4> idata,
PackedTensorAccessor<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val = h0lambda *
(w0lambda * idata[n][c][h1][w1] +
w1lambda * idata[n][c][h1][w1 + w1p]) +
h1lambda *
(w0lambda * idata[n][c][h1 + h1p][w1] +
w1lambda * idata[n][c][h1 + h1p][w1 + w1p]);
odata[n][c][h2][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_backward_out_frame(
const size_t nc,
const int height1,
const int width1,
const int height2,
const int width2,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
scalar_t* __restrict__ idata,
const scalar_t* __restrict__ odata) {
const size_t o_numel = nc * width2 * height2;
const size_t i_numel = nc * width1 * height1;
for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < o_numel;
index += blockDim.x * gridDim.x) {
size_t index_temp = index;
const int w2 = index_temp % width2; // 0:width2-1
index_temp /= width2;
const int h2 = index_temp % height2; // 0:height2-1
const size_t nc = index_temp / height2;
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
const scalar_t d2val = odata[index];
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1),
i_numel,
static_cast<scalar_t>(h0lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1 + w1p),
i_numel,
static_cast<scalar_t>(h0lambda * w1lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1),
i_numel,
static_cast<scalar_t>(h1lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1 + w1p),
i_numel,
static_cast<scalar_t>(h1lambda * w1lambda * d2val),
true);
}
}
static void upsample_bilinear2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bilinear2d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bilinear2d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_bilinear2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bilinear2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
if (grad_input.numel() == 0) {
return;
}
// A contiguous tensor is required for the kernel launch config
grad_input.contiguous();
// initialization to zero is required here. As we launch one thread per output
// element, and atomicAdd to input gradient. Given a sparse sampling case, our
// threads are not covering the whole input tensor.
grad_input.zero_();
const size_t num_kernels = nbatch * channels * output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bilinear2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
hipLaunchKernelGGL(( upsample_bilinear2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, static_cast<size_t>(num_threads))),
dim3(num_threads),
0,
stream,
nbatch * channels,
input_height,
input_width,
output_height,
output_width,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_bilinear2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bilinear2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bilinear2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bilinear2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_bilinear2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_bilinear2d_out_cuda_template(output, input, osize, align_corners, scale_h, scale_w);
return output;
}
Tensor upsample_bilinear2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, align_corners, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
4d4c8551025848a4518e1da1381e7422a99686e6.cu
|
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
namespace at {
namespace native {
namespace {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 4> idata,
PackedTensorAccessor<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val = h0lambda *
(w0lambda * idata[n][c][h1][w1] +
w1lambda * idata[n][c][h1][w1 + w1p]) +
h1lambda *
(w0lambda * idata[n][c][h1 + h1p][w1] +
w1lambda * idata[n][c][h1 + h1p][w1 + w1p]);
odata[n][c][h2][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_backward_out_frame(
const size_t nc,
const int height1,
const int width1,
const int height2,
const int width2,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
scalar_t* __restrict__ idata,
const scalar_t* __restrict__ odata) {
const size_t o_numel = nc * width2 * height2;
const size_t i_numel = nc * width1 * height1;
for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < o_numel;
index += blockDim.x * gridDim.x) {
size_t index_temp = index;
const int w2 = index_temp % width2; // 0:width2-1
index_temp /= width2;
const int h2 = index_temp % height2; // 0:height2-1
const size_t nc = index_temp / height2;
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
const scalar_t d2val = odata[index];
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1),
i_numel,
static_cast<scalar_t>(h0lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1 + w1p),
i_numel,
static_cast<scalar_t>(h0lambda * w1lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1),
i_numel,
static_cast<scalar_t>(h1lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1 + w1p),
i_numel,
static_cast<scalar_t>(h1lambda * w1lambda * d2val),
true);
}
}
static void upsample_bilinear2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bilinear2d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bilinear2d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_bilinear2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bilinear2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
if (grad_input.numel() == 0) {
return;
}
// A contiguous tensor is required for the kernel launch config
grad_input.contiguous();
// initialization to zero is required here. As we launch one thread per output
// element, and atomicAdd to input gradient. Given a sparse sampling case, our
// threads are not covering the whole input tensor.
grad_input.zero_();
const size_t num_kernels = nbatch * channels * output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bilinear2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bilinear2d_backward_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, static_cast<size_t>(num_threads)),
num_threads,
0,
stream>>>(
nbatch * channels,
input_height,
input_width,
output_height,
output_width,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_bilinear2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor upsample_bilinear2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners, scales_h, scales_w);
return output;
}
Tensor& upsample_bilinear2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
Tensor upsample_bilinear2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
return grad_input;
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_bilinear2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_bilinear2d_out_cuda_template(output, input, osize, align_corners, scale_h, scale_w);
return output;
}
Tensor upsample_bilinear2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<ArrayRef<double>> scale_factors) {
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, align_corners, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
c90ef42f07d5084d5f8e0eac6b4706ae0876902e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "image.h"
#define PIXELS_PER_BLOCK 512
#define THREAD_PER_BLOCK 4
#define min(a, b) ((a) < (b) ? (a) : (b))
__global__ void filter(BYTE* img, int filter_size, double* filter, int height, int width, int channels, BYTE* res)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
double s = 0.0;
//if (x ==0 && y == 0)
{
for (int i = 0; i < filter_size; i++)
{
for (int j = 0; j < filter_size; j++)
{
s += filter[i * filter_size + j];
}
}
for (int i = 0; i < filter_size; i++)
{
for (int j = 0; j < filter_size; j++)
{
filter[i * filter_size + j] /= s;
}
}
filter_size /= 2;
}
//__syncthreads();
//printf("%d , %lf \n", filter_size, s);
for (int i = x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i < min(height, x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i++)
{
for (int j = y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j < min(width, y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j++)
{
for (int c = 0; c < channels; c++)
{
res[i * width * channels + j * channels + c] = 0;
for (int v = -filter_size; v <= filter_size; v++)
{
for (int h = -filter_size; h <= filter_size; h++)
{
if (i + v >= 0 && j + h >= 0 && i + v <= height - 1 && j + h <= width - 1)
{
res[i * width * channels + j * channels + c] += filter[(v + filter_size) * (filter_size * 2 + 1) + h + filter_size] * img[(i + v) * width * channels + (j + h) * channels + c];
}
}
}
}
}
}
}
int main(int argc, char** argv)
{
if (argc < 3)
{
printf("usage : %s <input_image_path> <output_image_path>", argv[0]);
exit(1);
}
int width, height, channels;
BYTE* h_img = stbi_load(argv[1], &width, &height, &channels, 0);
BYTE* d_img;
BYTE* d_res_img;
if(h_img == NULL)
{
printf("Error in loading the image\n");
exit(1);
}
hipMalloc(&d_img, sizeof(BYTE) * width * height * channels);
hipMalloc(&d_res_img, sizeof(BYTE) * width * height * channels);
hipMemcpy(d_img, h_img, sizeof(BYTE) * width * height *channels, hipMemcpyHostToDevice);
int filter_size = 3;
printf("Size of filter : \n");
scanf("%d", &filter_size);
double* h_filter = (double*) malloc(sizeof(double) * filter_size * filter_size);
if (filter_size & 1 == 0)
{
printf("filter size must be odd\n");
exit(1);
}
printf("Filter : \n");
for (int i = 0; i < filter_size; ++i)
{
for (int j = 0; j < filter_size; ++j)
{
printf("filter[i][j] : \n");
scanf("%d", &h_filter[i * filter_size + j]);
}
}
double* d_filter;
// h_filter[0] = 1; h_filter[1] = 1; h_filter[2] = 1;
// h_filter[3] = 1; h_filter[4] = 1; h_filter[5] = 1;
// h_filter[6] = 1; h_filter[7] = 1; h_filter[8] = 1;
hipMalloc(&d_filter, sizeof(double) * filter_size * filter_size);
hipMemcpy(d_filter, h_filter, sizeof(double) * filter_size * filter_size, hipMemcpyHostToDevice);
int blck_x = (height + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK;
int blck_y = (width + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK;
hipLaunchKernelGGL(( filter), dim3(dim3(blck_x, blck_y)), dim3(dim3(THREAD_PER_BLOCK, THREAD_PER_BLOCK)), 0, 0, d_img, filter_size, d_filter, height, width, channels, d_res_img);
hipMemcpy(h_img, d_res_img, sizeof(BYTE) * height * width * channels, hipMemcpyDeviceToHost);
if(!stbi_write_jpg(argv[2], width, height, channels, h_img, 100))
{
printf("Error in writing the image\n");
exit(1);
}
free(h_filter);
hipFree(d_filter);
free(h_img);
hipFree(d_img);
hipFree(d_res_img);
return 0;
}
|
c90ef42f07d5084d5f8e0eac6b4706ae0876902e.cu
|
#include "image.h"
#define PIXELS_PER_BLOCK 512
#define THREAD_PER_BLOCK 4
#define min(a, b) ((a) < (b) ? (a) : (b))
__global__ void filter(BYTE* img, int filter_size, double* filter, int height, int width, int channels, BYTE* res)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
double s = 0.0;
//if (x ==0 && y == 0)
{
for (int i = 0; i < filter_size; i++)
{
for (int j = 0; j < filter_size; j++)
{
s += filter[i * filter_size + j];
}
}
for (int i = 0; i < filter_size; i++)
{
for (int j = 0; j < filter_size; j++)
{
filter[i * filter_size + j] /= s;
}
}
filter_size /= 2;
}
//__syncthreads();
//printf("%d , %lf \n", filter_size, s);
for (int i = x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i < min(height, x * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); i++)
{
for (int j = y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j < min(width, y * (PIXELS_PER_BLOCK / THREAD_PER_BLOCK) + PIXELS_PER_BLOCK / THREAD_PER_BLOCK); j++)
{
for (int c = 0; c < channels; c++)
{
res[i * width * channels + j * channels + c] = 0;
for (int v = -filter_size; v <= filter_size; v++)
{
for (int h = -filter_size; h <= filter_size; h++)
{
if (i + v >= 0 && j + h >= 0 && i + v <= height - 1 && j + h <= width - 1)
{
res[i * width * channels + j * channels + c] += filter[(v + filter_size) * (filter_size * 2 + 1) + h + filter_size] * img[(i + v) * width * channels + (j + h) * channels + c];
}
}
}
}
}
}
}
int main(int argc, char** argv)
{
if (argc < 3)
{
printf("usage : %s <input_image_path> <output_image_path>", argv[0]);
exit(1);
}
int width, height, channels;
BYTE* h_img = stbi_load(argv[1], &width, &height, &channels, 0);
BYTE* d_img;
BYTE* d_res_img;
if(h_img == NULL)
{
printf("Error in loading the image\n");
exit(1);
}
cudaMalloc(&d_img, sizeof(BYTE) * width * height * channels);
cudaMalloc(&d_res_img, sizeof(BYTE) * width * height * channels);
cudaMemcpy(d_img, h_img, sizeof(BYTE) * width * height *channels, cudaMemcpyHostToDevice);
int filter_size = 3;
printf("Size of filter : \n");
scanf("%d", &filter_size);
double* h_filter = (double*) malloc(sizeof(double) * filter_size * filter_size);
if (filter_size & 1 == 0)
{
printf("filter size must be odd\n");
exit(1);
}
printf("Filter : \n");
for (int i = 0; i < filter_size; ++i)
{
for (int j = 0; j < filter_size; ++j)
{
printf("filter[i][j] : \n");
scanf("%d", &h_filter[i * filter_size + j]);
}
}
double* d_filter;
// h_filter[0] = 1; h_filter[1] = 1; h_filter[2] = 1;
// h_filter[3] = 1; h_filter[4] = 1; h_filter[5] = 1;
// h_filter[6] = 1; h_filter[7] = 1; h_filter[8] = 1;
cudaMalloc(&d_filter, sizeof(double) * filter_size * filter_size);
cudaMemcpy(d_filter, h_filter, sizeof(double) * filter_size * filter_size, cudaMemcpyHostToDevice);
int blck_x = (height + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK;
int blck_y = (width + PIXELS_PER_BLOCK - 1) / PIXELS_PER_BLOCK;
filter<<<dim3(blck_x, blck_y), dim3(THREAD_PER_BLOCK, THREAD_PER_BLOCK)>>>(d_img, filter_size, d_filter, height, width, channels, d_res_img);
cudaMemcpy(h_img, d_res_img, sizeof(BYTE) * height * width * channels, cudaMemcpyDeviceToHost);
if(!stbi_write_jpg(argv[2], width, height, channels, h_img, 100))
{
printf("Error in writing the image\n");
exit(1);
}
free(h_filter);
cudaFree(d_filter);
free(h_img);
cudaFree(d_img);
cudaFree(d_res_img);
return 0;
}
|
5a662b71d9b70dc875c55418e53095b8e35c2330.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sum(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
|
5a662b71d9b70dc875c55418e53095b8e35c2330.cu
|
#include "includes.h"
__global__ void sum(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
|
d2b111c2300f2d4dc59632d0e1203a7cbed15d94.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+= stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 64;
int numBlocks = (N + blockSize - 1) / blockSize / 24;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
|
d2b111c2300f2d4dc59632d0e1203a7cbed15d94.cu
|
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+= stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 64;
int numBlocks = (N + blockSize - 1) / blockSize / 24;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
6a80343526d7746496a1345f91ac127f469b4508.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SumaColMatrizKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int M = 2;
int N = XSIZE*YSIZE;
float *Md = NULL;
hipMalloc(&Md, XSIZE*YSIZE);
float *Nd = NULL;
hipMalloc(&Nd, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SumaColMatrizKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, M,N,Md,Nd);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SumaColMatrizKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, M,N,Md,Nd);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SumaColMatrizKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, M,N,Md,Nd);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6a80343526d7746496a1345f91ac127f469b4508.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SumaColMatrizKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int M = 2;
int N = XSIZE*YSIZE;
float *Md = NULL;
cudaMalloc(&Md, XSIZE*YSIZE);
float *Nd = NULL;
cudaMalloc(&Nd, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SumaColMatrizKernel<<<gridBlock,threadBlock>>>(M,N,Md,Nd);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SumaColMatrizKernel<<<gridBlock,threadBlock>>>(M,N,Md,Nd);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SumaColMatrizKernel<<<gridBlock,threadBlock>>>(M,N,Md,Nd);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
db67d0b1efc29b0f9200a03c899c9849ec0367f4.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2018 Wei Dai <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// Include these two files for GPU computing.
#include <include/cufhe_gpu.cuh>
using namespace cufhe;
#include <iostream>
using namespace std;
Ctxt cufhe::ct_zero;
Ctxt cufhe::ct_one;
void NandCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = 1 - in0.message_ * in1.message_;
}
void OrCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) > 0;
}
void AndCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = in0.message_ * in1.message_;
}
void XorCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) & 0x1;
}
int main() {
hipSetDevice(0);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
uint32_t kNumSMs = prop.multiProcessorCount;
uint32_t kNumTests = kNumSMs * 32;// * 8;
uint32_t kNumLevels = 4;
SetSeed(); // set random seed
PriKey pri_key; // private key
PubKey pub_key; // public key
Ptxt* pt = new Ptxt[2 * kNumTests];
Ctxt* ct = new Ctxt[2 * kNumTests];
Synchronize();
bool correct;
cout<< "------ Key Generation ------" <<endl;
KeyGen(pub_key, pri_key);
// Alternatively ...
// PriKeyGen(pri_key);
// PubKeyGen(pub_key, pri_key);
cout<< "------ Test Encryption/Decryption ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
correct = true;
for (int i = 0; i < kNumTests; i ++) {
pt[i].message_ = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
Decrypt(pt[kNumTests + i], ct[i], pri_key);
if (pt[kNumTests + i].message_ != pt[i].message_) {
correct = false;
break;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL" <<endl;
cout<< "------ Initilizating Data on GPU(s) ------" <<endl;
Initialize(pub_key); // essential for GPU computing
cout<< "------ Test NAND Gate ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
// Create CUDA streams for parallel gates.
Stream* st = new Stream[kNumSMs];
for (int i = 0; i < kNumSMs; i ++)
st[i].Create();
correct = true;
for (int i = 0; i < 2 * kNumTests; i ++) {
pt[i] = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
}
Synchronize();
float et;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Here, pass streams to gates for parallel gates.
for (int i = 0; i < kNumTests; i ++)
Nand(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Or(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
And(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Xor(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
Synchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&et, start, stop);
cout<< et / kNumTests / kNumLevels << " ms / gate" <<endl;
hipEventDestroy(start);
hipEventDestroy(stop);
int cnt_failures = 0;
for (int i = 0; i < kNumTests; i ++) {
NandCheck(pt[i], pt[i], pt[i + kNumTests]);
OrCheck(pt[i], pt[i], pt[i + kNumTests]);
AndCheck(pt[i], pt[i], pt[i + kNumTests]);
XorCheck(pt[i], pt[i], pt[i + kNumTests]);
Decrypt(pt[i + kNumTests], ct[i], pri_key);
if (pt[i + kNumTests].message_ != pt[i].message_) {
correct = false;
cnt_failures += 1;
//std::cout<< "Fail at iteration: " << i <<std::endl;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL:\t" << cnt_failures << "/" << kNumTests <<endl;
for (int i = 0; i < kNumSMs; i ++)
st[i].Destroy();
delete [] st;
cout<< "------ Cleaning Data on GPU(s) ------" <<endl;
CleanUp(); // essential to clean and deallocate data
delete [] ct;
delete [] pt;
return 0;
}
|
db67d0b1efc29b0f9200a03c899c9849ec0367f4.cu
|
/**
* Copyright 2018 Wei Dai <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// Include these two files for GPU computing.
#include <include/cufhe_gpu.cuh>
using namespace cufhe;
#include <iostream>
using namespace std;
Ctxt cufhe::ct_zero;
Ctxt cufhe::ct_one;
void NandCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = 1 - in0.message_ * in1.message_;
}
void OrCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) > 0;
}
void AndCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = in0.message_ * in1.message_;
}
void XorCheck(Ptxt& out, const Ptxt& in0, const Ptxt& in1) {
out.message_ = (in0.message_ + in1.message_) & 0x1;
}
int main() {
cudaSetDevice(0);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
uint32_t kNumSMs = prop.multiProcessorCount;
uint32_t kNumTests = kNumSMs * 32;// * 8;
uint32_t kNumLevels = 4;
SetSeed(); // set random seed
PriKey pri_key; // private key
PubKey pub_key; // public key
Ptxt* pt = new Ptxt[2 * kNumTests];
Ctxt* ct = new Ctxt[2 * kNumTests];
Synchronize();
bool correct;
cout<< "------ Key Generation ------" <<endl;
KeyGen(pub_key, pri_key);
// Alternatively ...
// PriKeyGen(pri_key);
// PubKeyGen(pub_key, pri_key);
cout<< "------ Test Encryption/Decryption ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
correct = true;
for (int i = 0; i < kNumTests; i ++) {
pt[i].message_ = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
Decrypt(pt[kNumTests + i], ct[i], pri_key);
if (pt[kNumTests + i].message_ != pt[i].message_) {
correct = false;
break;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL" <<endl;
cout<< "------ Initilizating Data on GPU(s) ------" <<endl;
Initialize(pub_key); // essential for GPU computing
cout<< "------ Test NAND Gate ------" <<endl;
cout<< "Number of tests:\t" << kNumTests <<endl;
// Create CUDA streams for parallel gates.
Stream* st = new Stream[kNumSMs];
for (int i = 0; i < kNumSMs; i ++)
st[i].Create();
correct = true;
for (int i = 0; i < 2 * kNumTests; i ++) {
pt[i] = rand() % Ptxt::kPtxtSpace;
Encrypt(ct[i], pt[i], pri_key);
}
Synchronize();
float et;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Here, pass streams to gates for parallel gates.
for (int i = 0; i < kNumTests; i ++)
Nand(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Or(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
And(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
for (int i = 0; i < kNumTests; i ++)
Xor(ct[i], ct[i], ct[i + kNumTests], st[i % kNumSMs]);
Synchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
cout<< et / kNumTests / kNumLevels << " ms / gate" <<endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
int cnt_failures = 0;
for (int i = 0; i < kNumTests; i ++) {
NandCheck(pt[i], pt[i], pt[i + kNumTests]);
OrCheck(pt[i], pt[i], pt[i + kNumTests]);
AndCheck(pt[i], pt[i], pt[i + kNumTests]);
XorCheck(pt[i], pt[i], pt[i + kNumTests]);
Decrypt(pt[i + kNumTests], ct[i], pri_key);
if (pt[i + kNumTests].message_ != pt[i].message_) {
correct = false;
cnt_failures += 1;
//std::cout<< "Fail at iteration: " << i <<std::endl;
}
}
if (correct)
cout<< "PASS" <<endl;
else
cout<< "FAIL:\t" << cnt_failures << "/" << kNumTests <<endl;
for (int i = 0; i < kNumSMs; i ++)
st[i].Destroy();
delete [] st;
cout<< "------ Cleaning Data on GPU(s) ------" <<endl;
CleanUp(); // essential to clean and deallocate data
delete [] ct;
delete [] pt;
return 0;
}
|
9833a939d15d3f18c5e70be789f029143bc1ba4b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <opencv2\opencv.hpp>
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
using namespace cv;
/*
*******************************************
*
* float* src
float* dst
int rows
int cols
float* kernel
int kernel_size
*******************************************
*/
__global__ void conv2d(float* src, float* dst, int rows, int cols, float* kernel, int kernel_size)
{
int row = blockIdx.x;
if (row < 1 || row > rows - 1)
return;
int col = blockIdx.y;
if (col < 1 || col > cols - 1)
return;
int dstIndex = col * rows + row;
dst[dstIndex] = 0;
int kerIndex = kernel_size * kernel_size - 1;
for (int kc = -kernel_size / 2; kc < kernel_size / 2 + 1; kc++) {
int srcIndex = (col + kc) * rows + row;
for (int kr = -kernel_size / 2; kr < kernel_size / 2 + 1; kr++)
dst[dstIndex] += kernel[kerIndex--] * src[srcIndex + kr];
}
}
int main()
{
Mat img = imread("lena.jpg", IMREAD_GRAYSCALE);
int width, height, channels;
width = img.size().width;
height = img.size().height;
channels = img.channels();
printf("width = %d, height = %d, channels = %d\n", width, height, channels);
int size = width * height * channels;
float* hostSrc = new float[size];
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
hostSrc[i*width + j] = float(img.data[i*width + j]);
float* deviceSrc, *deviceDst, *deviceKer;
float* hostDst = new float[size];
float kernel[9] = { 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.1 };
int kernel_size = 3;
hipMalloc((void**)&deviceSrc, size * sizeof(float));
hipMalloc((void**)&deviceDst, size * sizeof(float));
hipMalloc((void**)&deviceKer, size * sizeof(float));
hipMemcpy(deviceSrc, hostSrc, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceDst, 0, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceKer, kernel, kernel_size * kernel_size * sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(height, width);
conv2d << <dimGrid, 1 >> > (deviceSrc, deviceDst, height, width, deviceKer, kernel_size);
hipMemcpy(hostDst, deviceDst, size * sizeof(float), hipMemcpyDeviceToHost);
hipFree(deviceSrc);
hipFree(deviceDst);
hipFree(deviceKer);
Mat img1(height, width, img.type());
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
img1.data[i*width + j] = uchar(hostDst[i * width + j]);
imshow("lena.jpg", img);
imshow("lena_conv2d.jpg", img1);
waitKey(0);
return 0;
}
|
9833a939d15d3f18c5e70be789f029143bc1ba4b.cu
|
#include <stdio.h>
#include <opencv2\opencv.hpp>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
using namespace cv;
/*
*******************************************
* 函数功能: 图像卷积
* 函数输入:float* src 输入图像矩阵
float* dst 卷积后图像
int rows 图像行数
int cols 图像列数
float* kernel 卷积核
int kernel_size 卷积核尺寸大小
*******************************************
*/
__global__ void conv2d(float* src, float* dst, int rows, int cols, float* kernel, int kernel_size)
{
int row = blockIdx.x;
if (row < 1 || row > rows - 1)
return;
int col = blockIdx.y;
if (col < 1 || col > cols - 1)
return;
int dstIndex = col * rows + row;
dst[dstIndex] = 0;
int kerIndex = kernel_size * kernel_size - 1;
for (int kc = -kernel_size / 2; kc < kernel_size / 2 + 1; kc++) {
int srcIndex = (col + kc) * rows + row;
for (int kr = -kernel_size / 2; kr < kernel_size / 2 + 1; kr++)
dst[dstIndex] += kernel[kerIndex--] * src[srcIndex + kr];
}
}
int main()
{
Mat img = imread("lena.jpg", IMREAD_GRAYSCALE);
int width, height, channels;
width = img.size().width;
height = img.size().height;
channels = img.channels();
printf("width = %d, height = %d, channels = %d\n", width, height, channels);
int size = width * height * channels;
float* hostSrc = new float[size];
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
hostSrc[i*width + j] = float(img.data[i*width + j]);
float* deviceSrc, *deviceDst, *deviceKer;
float* hostDst = new float[size];
float kernel[9] = { 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, 0.1, 0.1, 0.1 };
int kernel_size = 3;
cudaMalloc((void**)&deviceSrc, size * sizeof(float));
cudaMalloc((void**)&deviceDst, size * sizeof(float));
cudaMalloc((void**)&deviceKer, size * sizeof(float));
cudaMemcpy(deviceSrc, hostSrc, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceDst, 0, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceKer, kernel, kernel_size * kernel_size * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(height, width);
conv2d << <dimGrid, 1 >> > (deviceSrc, deviceDst, height, width, deviceKer, kernel_size);
cudaMemcpy(hostDst, deviceDst, size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(deviceSrc);
cudaFree(deviceDst);
cudaFree(deviceKer);
Mat img1(height, width, img.type());
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
img1.data[i*width + j] = uchar(hostDst[i * width + j]);
imshow("lena.jpg", img);
imshow("lena_conv2d.jpg", img1);
waitKey(0);
return 0;
}
|
d752d67d5545f8f1ae864750015d5071f18f89dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include "Utilities.cuh"
#include "TimingGPU.cuh"
//#define FULLSVD
//#define PRINTRESULTS
/********/
/* MAIN */
/********/
int main() {
const int M = 3;
const int N = 3;
const int lda = M;
//const int numMatrices = 3;
const int numMatrices = 16384;
TimingGPU timerGPU;
// --- Setting the host matrix
double *h_A = (double *)malloc(lda * N * numMatrices * sizeof(double));
for (unsigned int k = 0; k < numMatrices; k++)
for (unsigned int i = 0; i < M; i++){
for (unsigned int j = 0; j < N; j++){
h_A[k * M * N + j * M + i] = (1. / (k + 1)) * (i + j * j) * (i + j);
//printf("%d %d %f\n", i, j, h_A[j*M + i]);
}
}
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; gpuErrchk(hipMalloc(&d_A, M * N * numMatrices * sizeof(double)));
gpuErrchk(hipMemcpy(d_A, h_A, M * N * numMatrices * sizeof(double), hipMemcpyHostToDevice));
// --- host side SVD results space
double *h_S = (double *)malloc(N * numMatrices * sizeof(double));
double *h_U = NULL;
double *h_V = NULL;
#ifdef FULLSVD
h_U = (double *)malloc(M * M * numMatrices * sizeof(double));
h_V = (double *)malloc(N * N * numMatrices * sizeof(double));
#endif
// --- device side SVD workspace and matrices
int work_size = 0;
int *devInfo; gpuErrchk(hipMalloc(&devInfo, sizeof(int)));
double *d_S; gpuErrchk(hipMalloc(&d_S, N * numMatrices * sizeof(double)));
double *d_U = NULL;
double *d_V = NULL;
#ifdef FULLSVD
gpuErrchk(hipMalloc(&d_U, M * M * numMatrices * sizeof(double)));
gpuErrchk(hipMalloc(&d_V, N * N * numMatrices * sizeof(double)));
#endif
double *d_work = NULL; /* devie workspace for gesvdj */
int devInfo_h = 0; /* host copy of error devInfo_h */
// --- Parameters configuration of Jacobi-based SVD
const double tol = 1.e-7;
const int maxSweeps = 15;
hipsolverEigMode_t jobz; // --- HIPSOLVER_EIG_MODE_VECTOR - Compute eigenvectors; HIPSOLVER_EIG_MODE_NOVECTOR - Compute singular values only
#ifdef FULLSVD
jobz = HIPSOLVER_EIG_MODE_VECTOR;
#else
jobz = HIPSOLVER_EIG_MODE_NOVECTOR;
#endif
const int econ = 0; // --- econ = 1 for economy size
// --- Numerical result parameters of gesvdj
double residual = 0;
int executedSweeps = 0;
// --- CUDA solver initialization
hipsolverDnHandle_t solver_handle = NULL;
cusolveSafeCall(hipsolverDnCreate(&solver_handle));
// --- Configuration of gesvdj
hipsolverGesvdjInfo_t gesvdj_params = NULL;
cusolveSafeCall(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// --- Set the computation tolerance, since the default tolerance is machine precision
cusolveSafeCall(hipsolverDnXgesvdjSetTolerance(gesvdj_params, tol));
// --- Set the maximum number of sweeps, since the default value of max. sweeps is 100
cusolveSafeCall(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, maxSweeps));
// --- Query the SVD workspace
cusolveSafeCall(hipsolverDnDgesvdjBatched_bufferSize(
solver_handle,
jobz, // --- Compute the singular vectors or not
M, // --- Nubmer of rows of A, 0 <= M
N, // --- Number of columns of A, 0 <= N
d_A, // --- M x N
lda, // --- Leading dimension of A
d_S, // --- Square matrix of size min(M, N) x min(M, N)
d_U, // --- M x M if econ = 0, M x min(M, N) if econ = 1
lda, // --- Leading dimension of U, ldu >= max(1, M)
d_V, // --- N x N if econ = 0, N x min(M,N) if econ = 1
lda, // --- Leading dimension of V, ldv >= max(1, N)
&work_size,
gesvdj_params,
numMatrices));
gpuErrchk(hipMalloc(&d_work, sizeof(double) * work_size));
// --- Compute SVD
timerGPU.StartCounter();
cusolveSafeCall(hipsolverDnDgesvdjBatched(
solver_handle,
jobz, // --- Compute the singular vectors or not
M, // --- Number of rows of A, 0 <= M
N, // --- Number of columns of A, 0 <= N
d_A, // --- M x N
lda, // --- Leading dimension of A
d_S, // --- Square matrix of size min(M, N) x min(M, N)
d_U, // --- M x M if econ = 0, M x min(M, N) if econ = 1
lda, // --- Leading dimension of U, ldu >= max(1, M)
d_V, // --- N x N if econ = 0, N x min(M, N) if econ = 1
lda, // --- Leading dimension of V, ldv >= max(1, N)
d_work,
work_size,
devInfo,
gesvdj_params,
numMatrices));
printf("Calculation of the singular values only: %f ms\n\n", timerGPU.GetCounter());
gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_S, d_S, sizeof(double) * N * numMatrices, hipMemcpyDeviceToHost));
#ifdef FULLSVD
gpuErrchk(hipMemcpy(h_U, d_U, sizeof(double) * lda * M * numMatrices, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_V, d_V, sizeof(double) * lda * N * numMatrices, hipMemcpyDeviceToHost));
#endif
#ifdef PRINTRESULTS
printf("SINGULAR VALUES \n");
printf("_______________ \n");
for (int k = 0; k < numMatrices; k++) {
for (int p = 0; p < N; p++)
printf("Matrix nr. %d; SV nr. %d; Value = %f\n", k, p, h_S[k * N + p]);
printf("\n");
}
#ifdef FULLSVD
printf("SINGULAR VECTORS U \n");
printf("__________________ \n");
for (int k = 0; k < numMatrices; k++) {
for (int q = 0; q < (1 - econ) * M + econ * min(M, N); q++)
for (int p = 0; p < M; p++)
printf("Matrix nr. %d; U nr. %d; Value = %f\n", k, p, h_U[((1 - econ) * M + econ * min(M, N)) * M * k + q * M + p]);
printf("\n");
}
printf("SINGULAR VECTORS V \n");
printf("__________________ \n");
for (int k = 0; k < numMatrices; k++) {
for (int q = 0; q < (1 - econ) * N + econ * min(M, N); q++)
for (int p = 0; p < N; p++)
printf("Matrix nr. %d; V nr. %d; Value = %f\n", k, p, h_V[((1 - econ) * N + econ * min(M, N)) * N * k + q * N + p]);
printf("\n");
}
#endif
#endif
if (0 == devInfo_h){
printf("gesvdj converges \n");
}
else if (0 > devInfo_h){
printf("%d-th parameter is wrong \n", -devInfo_h);
exit(1);
}
else{
printf("WARNING: devInfo_h = %d : gesvdj does not converge \n", devInfo_h);
}
// --- Free resources
if (d_A) gpuErrchk(hipFree(d_A));
if (d_S) gpuErrchk(hipFree(d_S));
#ifdef FULLSVD
if (d_U) gpuErrchk(hipFree(d_U));
if (d_V) gpuErrchk(hipFree(d_V));
#endif
if (devInfo) gpuErrchk(hipFree(devInfo));
if (d_work) gpuErrchk(hipFree(d_work));
if (solver_handle) cusolveSafeCall(hipsolverDnDestroy(solver_handle));
if (gesvdj_params) cusolveSafeCall(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
gpuErrchk(hipDeviceReset());
return 0;
}
|
d752d67d5545f8f1ae864750015d5071f18f89dd.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include "Utilities.cuh"
#include "TimingGPU.cuh"
//#define FULLSVD
//#define PRINTRESULTS
/********/
/* MAIN */
/********/
int main() {
const int M = 3;
const int N = 3;
const int lda = M;
//const int numMatrices = 3;
const int numMatrices = 16384;
TimingGPU timerGPU;
// --- Setting the host matrix
double *h_A = (double *)malloc(lda * N * numMatrices * sizeof(double));
for (unsigned int k = 0; k < numMatrices; k++)
for (unsigned int i = 0; i < M; i++){
for (unsigned int j = 0; j < N; j++){
h_A[k * M * N + j * M + i] = (1. / (k + 1)) * (i + j * j) * (i + j);
//printf("%d %d %f\n", i, j, h_A[j*M + i]);
}
}
// --- Setting the device matrix and moving the host matrix to the device
double *d_A; gpuErrchk(cudaMalloc(&d_A, M * N * numMatrices * sizeof(double)));
gpuErrchk(cudaMemcpy(d_A, h_A, M * N * numMatrices * sizeof(double), cudaMemcpyHostToDevice));
// --- host side SVD results space
double *h_S = (double *)malloc(N * numMatrices * sizeof(double));
double *h_U = NULL;
double *h_V = NULL;
#ifdef FULLSVD
h_U = (double *)malloc(M * M * numMatrices * sizeof(double));
h_V = (double *)malloc(N * N * numMatrices * sizeof(double));
#endif
// --- device side SVD workspace and matrices
int work_size = 0;
int *devInfo; gpuErrchk(cudaMalloc(&devInfo, sizeof(int)));
double *d_S; gpuErrchk(cudaMalloc(&d_S, N * numMatrices * sizeof(double)));
double *d_U = NULL;
double *d_V = NULL;
#ifdef FULLSVD
gpuErrchk(cudaMalloc(&d_U, M * M * numMatrices * sizeof(double)));
gpuErrchk(cudaMalloc(&d_V, N * N * numMatrices * sizeof(double)));
#endif
double *d_work = NULL; /* devie workspace for gesvdj */
int devInfo_h = 0; /* host copy of error devInfo_h */
// --- Parameters configuration of Jacobi-based SVD
const double tol = 1.e-7;
const int maxSweeps = 15;
cusolverEigMode_t jobz; // --- CUSOLVER_EIG_MODE_VECTOR - Compute eigenvectors; CUSOLVER_EIG_MODE_NOVECTOR - Compute singular values only
#ifdef FULLSVD
jobz = CUSOLVER_EIG_MODE_VECTOR;
#else
jobz = CUSOLVER_EIG_MODE_NOVECTOR;
#endif
const int econ = 0; // --- econ = 1 for economy size
// --- Numerical result parameters of gesvdj
double residual = 0;
int executedSweeps = 0;
// --- CUDA solver initialization
cusolverDnHandle_t solver_handle = NULL;
cusolveSafeCall(cusolverDnCreate(&solver_handle));
// --- Configuration of gesvdj
gesvdjInfo_t gesvdj_params = NULL;
cusolveSafeCall(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// --- Set the computation tolerance, since the default tolerance is machine precision
cusolveSafeCall(cusolverDnXgesvdjSetTolerance(gesvdj_params, tol));
// --- Set the maximum number of sweeps, since the default value of max. sweeps is 100
cusolveSafeCall(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, maxSweeps));
// --- Query the SVD workspace
cusolveSafeCall(cusolverDnDgesvdjBatched_bufferSize(
solver_handle,
jobz, // --- Compute the singular vectors or not
M, // --- Nubmer of rows of A, 0 <= M
N, // --- Number of columns of A, 0 <= N
d_A, // --- M x N
lda, // --- Leading dimension of A
d_S, // --- Square matrix of size min(M, N) x min(M, N)
d_U, // --- M x M if econ = 0, M x min(M, N) if econ = 1
lda, // --- Leading dimension of U, ldu >= max(1, M)
d_V, // --- N x N if econ = 0, N x min(M,N) if econ = 1
lda, // --- Leading dimension of V, ldv >= max(1, N)
&work_size,
gesvdj_params,
numMatrices));
gpuErrchk(cudaMalloc(&d_work, sizeof(double) * work_size));
// --- Compute SVD
timerGPU.StartCounter();
cusolveSafeCall(cusolverDnDgesvdjBatched(
solver_handle,
jobz, // --- Compute the singular vectors or not
M, // --- Number of rows of A, 0 <= M
N, // --- Number of columns of A, 0 <= N
d_A, // --- M x N
lda, // --- Leading dimension of A
d_S, // --- Square matrix of size min(M, N) x min(M, N)
d_U, // --- M x M if econ = 0, M x min(M, N) if econ = 1
lda, // --- Leading dimension of U, ldu >= max(1, M)
d_V, // --- N x N if econ = 0, N x min(M, N) if econ = 1
lda, // --- Leading dimension of V, ldv >= max(1, N)
d_work,
work_size,
devInfo,
gesvdj_params,
numMatrices));
printf("Calculation of the singular values only: %f ms\n\n", timerGPU.GetCounter());
gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_S, d_S, sizeof(double) * N * numMatrices, cudaMemcpyDeviceToHost));
#ifdef FULLSVD
gpuErrchk(cudaMemcpy(h_U, d_U, sizeof(double) * lda * M * numMatrices, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_V, d_V, sizeof(double) * lda * N * numMatrices, cudaMemcpyDeviceToHost));
#endif
#ifdef PRINTRESULTS
printf("SINGULAR VALUES \n");
printf("_______________ \n");
for (int k = 0; k < numMatrices; k++) {
for (int p = 0; p < N; p++)
printf("Matrix nr. %d; SV nr. %d; Value = %f\n", k, p, h_S[k * N + p]);
printf("\n");
}
#ifdef FULLSVD
printf("SINGULAR VECTORS U \n");
printf("__________________ \n");
for (int k = 0; k < numMatrices; k++) {
for (int q = 0; q < (1 - econ) * M + econ * min(M, N); q++)
for (int p = 0; p < M; p++)
printf("Matrix nr. %d; U nr. %d; Value = %f\n", k, p, h_U[((1 - econ) * M + econ * min(M, N)) * M * k + q * M + p]);
printf("\n");
}
printf("SINGULAR VECTORS V \n");
printf("__________________ \n");
for (int k = 0; k < numMatrices; k++) {
for (int q = 0; q < (1 - econ) * N + econ * min(M, N); q++)
for (int p = 0; p < N; p++)
printf("Matrix nr. %d; V nr. %d; Value = %f\n", k, p, h_V[((1 - econ) * N + econ * min(M, N)) * N * k + q * N + p]);
printf("\n");
}
#endif
#endif
if (0 == devInfo_h){
printf("gesvdj converges \n");
}
else if (0 > devInfo_h){
printf("%d-th parameter is wrong \n", -devInfo_h);
exit(1);
}
else{
printf("WARNING: devInfo_h = %d : gesvdj does not converge \n", devInfo_h);
}
// --- Free resources
if (d_A) gpuErrchk(cudaFree(d_A));
if (d_S) gpuErrchk(cudaFree(d_S));
#ifdef FULLSVD
if (d_U) gpuErrchk(cudaFree(d_U));
if (d_V) gpuErrchk(cudaFree(d_V));
#endif
if (devInfo) gpuErrchk(cudaFree(devInfo));
if (d_work) gpuErrchk(cudaFree(d_work));
if (solver_handle) cusolveSafeCall(cusolverDnDestroy(solver_handle));
if (gesvdj_params) cusolveSafeCall(cusolverDnDestroyGesvdjInfo(gesvdj_params));
gpuErrchk(cudaDeviceReset());
return 0;
}
|
c210813140d941b97bccd30bf6ea02306b488c54.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
zhemv_kernel_L_mgpu(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in zhemv (single GPU); why?
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_Z_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_Z_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_Z_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end zhemv_kernel_L_mgpu
/**************************************************************
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_zhemv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
********************************************************************/
__global__ void
zhemv_kernel_L_mgpu_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_zhemv_sync for beta*y
}
}
// end zhemv_kernel_L_mgpu_sum
/**
Purpose
-------
magmablas_zhemv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a COMPLEX_16 array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX_16 array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y COMPLEX_16 array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) COMPLEX_16 array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a COMPLEX_16 array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_zblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
magmaDoubleComplex const *x, magma_int_t incx,
magmaDoubleComplex beta, // unused, see magmablas_zhemv_mgpu_sync
magmaDoubleComplex *y, magma_int_t incy, // unused
magmaDoubleComplex *hwork, magma_int_t lhwork,
magmaDoubleComplex_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
magmaDoubleComplex const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
magmaDoubleComplex *dx_dev = dwork[dev];
magmaDoubleComplex *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_zsetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
hipLaunchKernelGGL(( zhemv_kernel_U_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( zhemv_kernel_U_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
hipLaunchKernelGGL(( zhemv_kernel_L_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( zhemv_kernel_L_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
magmaDoubleComplex *dx_dev = dwork[dev];
magma_zgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_zhemv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/**
Synchronizes and acculumates final zhemv result.
For convenience, the parameters are identical to magmablas_zhemv_mgpu
(though some are unused here).
@see magmablas_zhemv_mgpu
@ingroup magma_zblas2
********************************************************************/
extern "C" magma_int_t
magmablas_zhemv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_zhemv_mgpu
magma_int_t n,
magmaDoubleComplex alpha, // unused
magmaDoubleComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
magmaDoubleComplex const *x, magma_int_t incx, // unused
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy, // unused
magmaDoubleComplex *hwork, magma_int_t lhwork,
magmaDoubleComplex_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const magmaDoubleComplex c_one = MAGMA_Z_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_zscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_zaxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
c210813140d941b97bccd30bf6ea02306b488c54.cu
|
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
zhemv_kernel_L_mgpu(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in zhemv (single GPU); why?
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_Z_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_Z_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_Z_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end zhemv_kernel_L_mgpu
/**************************************************************
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_zhemv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
********************************************************************/
__global__ void
zhemv_kernel_L_mgpu_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_zhemv_sync for beta*y
}
}
// end zhemv_kernel_L_mgpu_sum
/**
Purpose
-------
magmablas_zhemv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a COMPLEX_16 array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX_16 array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y COMPLEX_16 array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) COMPLEX_16 array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a COMPLEX_16 array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_zblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
magmaDoubleComplex const *x, magma_int_t incx,
magmaDoubleComplex beta, // unused, see magmablas_zhemv_mgpu_sync
magmaDoubleComplex *y, magma_int_t incy, // unused
magmaDoubleComplex *hwork, magma_int_t lhwork,
magmaDoubleComplex_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
magmaDoubleComplex const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
magmaDoubleComplex *dx_dev = dwork[dev];
magmaDoubleComplex *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_zsetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
zhemv_kernel_U_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
zhemv_kernel_U_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
zhemv_kernel_L_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
zhemv_kernel_L_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
magmaDoubleComplex *dx_dev = dwork[dev];
magma_zgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_zhemv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/**
Synchronizes and acculumates final zhemv result.
For convenience, the parameters are identical to magmablas_zhemv_mgpu
(though some are unused here).
@see magmablas_zhemv_mgpu
@ingroup magma_zblas2
********************************************************************/
extern "C" magma_int_t
magmablas_zhemv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_zhemv_mgpu
magma_int_t n,
magmaDoubleComplex alpha, // unused
magmaDoubleComplex_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
magmaDoubleComplex const *x, magma_int_t incx, // unused
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy, // unused
magmaDoubleComplex *hwork, magma_int_t lhwork,
magmaDoubleComplex_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const magmaDoubleComplex c_one = MAGMA_Z_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_zscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_zaxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
5852e6ea9ca730425da5a6c88616f961138a0e94.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_1D_3D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_1D_3D), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_1D_3D), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_1D_3D), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5852e6ea9ca730425da5a6c88616f961138a0e94.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_1D_3D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_1D_3D<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_1D_3D<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_1D_3D<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f9eceb426cf3d8c1b5ad4c36d34a2f0e9839ef59.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <typename T, typename S, typename D> struct DivScalar : unary_function<T, D>
{
S val;
__host__ explicit DivScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a / val);
}
};
template <typename T, typename S, typename D> struct DivScalarInv : unary_function<T, D>
{
S val;
explicit DivScalarInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScalarInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream)
{
if (inv)
{
DivScalarInv<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScalar<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<float, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<float, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
|
f9eceb426cf3d8c1b5ad4c36d34a2f0e9839ef59.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <typename T, typename S, typename D> struct DivScalar : unary_function<T, D>
{
S val;
__host__ explicit DivScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a / val);
}
};
template <typename T, typename S, typename D> struct DivScalarInv : unary_function<T, D>
{
S val;
explicit DivScalarInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScalarInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream)
{
if (inv)
{
DivScalarInv<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScalar<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<float, float, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<float, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, bool inv, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
c41ffd5fdf3498eab0a23e435fc7dbaae12fd9df.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define SEED 0x7457
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
hipError_t err = hipGetLastError();\
if( err != hipSuccess){\
printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void XOR(long long int *Data, int Size, int Odd)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int size = Size;
int Bool= Odd;
while(size!=0){
if( tid < size )
if( tid == size -1 && Bool == 1 ){
// Do Nothing
}
else{
Data[tid] = Data[tid] ^ Data[ tid + size ];
}
__syncthreads();
// To avoid Infinite While Loop
if (size==1)
{
return;
}
// Odd Number Case
if( size % 2){
size = size/2 +1;
Bool = 1;
}
else{
Bool = 0;
size = size / 2;
}
}
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
long long int *HArray;
long long int *DArray;
unsigned long num = NUM; /*Default value of num from MACRO*/
// int blocks;
unsigned long Seed = SEED; /*Default value of Seed from MACRO*/
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
Seed= atoi(argv[2]);
if(Seed <= 0)
Seed = SEED;
}
else{
printf("%d", argc);
printf("Not Correct Number of Arguments");
return -1;
}
/* Allocate host (CPU) memory and initialize*/
HArray = (long long int*) malloc(num * sizeof(long long int) );
if(!HArray){
perror("malloc");
exit(-1);
}
srand(Seed);
for(int i=0;i<num;i++){
HArray[i]= random();
}
for(int i=0;i<num;i++){
printf("%lld ", HArray[i] );
if (i<num-1)
printf("^ ");
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
hipMalloc(&DArray, num * sizeof(long long int));
CUDA_ERROR_EXIT("hipMalloc");
hipMemcpy(DArray, HArray, num * sizeof(long long int) , hipMemcpyHostToDevice);
CUDA_ERROR_EXIT("hipMemcpy");
gettimeofday(&start, NULL);
int blocks = num;
if(num % 1024)
++blocks;
// XOR<<<1, (num + num%2)/2>>>(DArray, num%2);
if( num%2 ){
hipLaunchKernelGGL(( XOR), dim3(blocks), dim3(1024), 0, 0, DArray, (num + 1)/2, 1);
CUDA_ERROR_EXIT("kernel invocation");
}
else{
hipLaunchKernelGGL(( XOR), dim3(blocks), dim3(1024), 0, 0, DArray, num/2, 0);
CUDA_ERROR_EXIT("kernel invocation");
}
gettimeofday(&end, NULL);
/* Copy back result*/
hipMemcpy(HArray, DArray, num * sizeof(long long int) , hipMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("\nTotal time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
hipFree(DArray);
/*Print the last element for sanity check*/
printf("XOR: %lld\n", HArray[0]);
free(HArray);
}
|
c41ffd5fdf3498eab0a23e435fc7dbaae12fd9df.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define SEED 0x7457
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void XOR(long long int *Data, int Size, int Odd)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int size = Size;
int Bool= Odd;
while(size!=0){
if( tid < size )
if( tid == size -1 && Bool == 1 ){
// Do Nothing
}
else{
Data[tid] = Data[tid] ^ Data[ tid + size ];
}
__syncthreads();
// To avoid Infinite While Loop
if (size==1)
{
return;
}
// Odd Number Case
if( size % 2){
size = size/2 +1;
Bool = 1;
}
else{
Bool = 0;
size = size / 2;
}
}
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
long long int *HArray;
long long int *DArray;
unsigned long num = NUM; /*Default value of num from MACRO*/
// int blocks;
unsigned long Seed = SEED; /*Default value of Seed from MACRO*/
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
Seed= atoi(argv[2]);
if(Seed <= 0)
Seed = SEED;
}
else{
printf("%d", argc);
printf("Not Correct Number of Arguments");
return -1;
}
/* Allocate host (CPU) memory and initialize*/
HArray = (long long int*) malloc(num * sizeof(long long int) );
if(!HArray){
perror("malloc");
exit(-1);
}
srand(Seed);
for(int i=0;i<num;i++){
HArray[i]= random();
}
for(int i=0;i<num;i++){
printf("%lld ", HArray[i] );
if (i<num-1)
printf("^ ");
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&DArray, num * sizeof(long long int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(DArray, HArray, num * sizeof(long long int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
int blocks = num;
if(num % 1024)
++blocks;
// XOR<<<1, (num + num%2)/2>>>(DArray, num%2);
if( num%2 ){
XOR<<<blocks, 1024>>>(DArray, (num + 1)/2, 1);
CUDA_ERROR_EXIT("kernel invocation");
}
else{
XOR<<<blocks, 1024>>>(DArray, num/2, 0);
CUDA_ERROR_EXIT("kernel invocation");
}
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(HArray, DArray, num * sizeof(long long int) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("\nTotal time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(DArray);
/*Print the last element for sanity check*/
printf("XOR: %lld\n", HArray[0]);
free(HArray);
}
|
fa6ce191f4fd1764012e86250e3196941daf8854.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
void init_cpu_data(long long int* A, long long int size, long long int stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
//__global__ void Page_visitor(long long int *A, long long int *B, long long int data_stride, long long int clock_count){
__global__ void Page_visitor(long long int *A, long long int data_stride, long long int clock_count){////load-compute -store
/*
long long int index = threadIdx.x;
/////////////////////////////////time
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
if(index = 0){
start_time= clock64();
}
__syncthreads();
*/
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
int warpid = 1;
asm("mov.u32 %0, %warpid;" : "=r"(warpid) );
int ctaid = 1;
asm("mov.u32 %0, %ctaid.x;" : "=r"(ctaid) );
int nctaid = 1;
asm("mov.u32 %0, %nctaid.x;" : "=r"(nctaid) );
int ntid = 1;
asm("mov.u32 %0, %ntid.x;" : "=r"(ntid) );
int nsmid = 1;
asm("mov.u32 %0, %nsmid;" : "=r"(nsmid) );
int nwarpid = 1;
asm("mov.u32 %0, %nwarpid;" : "=r"(nwarpid) );
//printf("###1###warpid: %d, ctaid: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d, nctaid.x: %d, ntid: %d, nsmid: %d, nwarpid: %d \n", warpid, ctaid, blockIdx.x, blockIdx.y, blockIdx.z, nctaid, ntid, nsmid, nwarpid);
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###1###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );///printf does not reliably print everything
}
//}
}
if(threadIdx.x % 32 == 0){
printf("warpid: %d, blockIdx.x: %d, smid: %d, threadIdx.x: %d\n", warpid, blockIdx.x, smid, threadIdx.x);
}
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
long long int value = A[index];
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###2###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );
}
//}
}
/*
//////////////////////////////////////////////sleep
long long int start_clock = clock64();
long long int clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
*/
//////////////////////////////////////////////loop
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value = value + threadIdx.x;
}
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###3###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );
}
//}
}
/*
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
if(blockIdx.x == 55){
int nsmid = 1;
asm("mov.u32 %0, %smid;" : "=r"(nsmid) );
printf("nsmid: %d\n", nsmid);
}
}
*/
//d_o[0] = clock_offset;
//////////////////////////////////////////////sleep
A[index] = value;
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###4###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );
}
//}
}
/*
__syncthreads();
__syncthreads();
/////////////////////////////////time
if(index = 0){
start_time= clock64();
time_interval = end_time - start_time;//////clock
}
//B[0] = time_interval;
*/
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 7;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
checkCudaErrors(hipSetDevice(dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
//plain managed
//does not cause eviction
printf("###################\n#########################managed\n");
///32 * 64 <==> 1 * 512 * 1024 (8gb), 32 * 512 <==> 1 * 64 * 1024 (8gb),
///is it still true that in multi threads the dynamic page threshold is still 64k? no, it seems to be 2k.
//for(long long int data_stride = 1 * 1 * 1; data_stride <= 1 * 512 * 1024; data_stride = data_stride * 2){////not necessarily migrating whole 2m page. (not fair comparison but interesting to look at) 512 * 1024 is 4m, see what happens after 2m.
for(long long int data_stride = 1 * 1 * 1024; data_stride <= 1 * 1 * 1024; data_stride = data_stride * 2){///test
for(long long int mod = 536870912; mod <= 536870912; mod = mod * 2){////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//for(long long int clock_count = 128; clock_count <= 8192; clock_count = clock_count * 2){/////11 positions.
for(long long int clock_count = 1; clock_count <= 1; clock_count = clock_count * 2){/////test
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * 2048;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
//init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
hipLaunchKernelGGL(( gpu_initialization), dim3(2048), dim3(512), 0, 0, CPU_data_in, data_stride, data_size);///////////////1024 per block max
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
////may want to use more thread to see clock_count effect
hipLaunchKernelGGL(( Page_visitor), dim3(2048), dim3(512), 0, 0, CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 64 * 1 * 512 * 1024 = 8gb.
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
//checkCudaErrors(hipFree(GPU_data_out));
}
printf("\n");
}
}
/*
//plain managed
//causing eviction
printf("###################\n#########################managed\n");
///32 * 64 <==> 1 * 512 * 1024 (8gb), 32 * 512 <==> 1 * 64 * 1024 (8gb),
///is it still true that in multi threads the dynamic page threshold is still 64k? no, it seems to be 2k.
for(long long int data_stride = 1 * 1 * 1; data_stride <= 1 * 512 * 1024; data_stride = data_stride * 2){////not necessarily migrating whole 2m. 512 * 1024 is 4m, see what happens after 2m.
for(long long int mod = 536870912; mod <= 536870912; mod = mod * 2){////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
for(long long int clock_count = 128; clock_count <= 8192; clock_count = clock_count * 2){/////11 positions.
//for(long long int clock_count = 1; clock_count <= 1; clock_count = clock_count * 2){/////11 positions.
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * 8192;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
////may want to use more thread to see clock_count effect
Page_visitor<<<8192, 512>>>(CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 64 * 1 * 512 * 1024 = 8gb.
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
//checkCudaErrors(hipFree(GPU_data_out));
}
printf("\n");
}
}
*/
exit(EXIT_SUCCESS);
}
|
fa6ce191f4fd1764012e86250e3196941daf8854.cu
|
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
void init_cpu_data(long long int* A, long long int size, long long int stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
//__global__ void Page_visitor(long long int *A, long long int *B, long long int data_stride, long long int clock_count){
__global__ void Page_visitor(long long int *A, long long int data_stride, long long int clock_count){////load-compute -store
/*
long long int index = threadIdx.x;
/////////////////////////////////time
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
if(index = 0){
start_time= clock64();
}
__syncthreads();
*/
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
int warpid = 1;
asm("mov.u32 %0, %warpid;" : "=r"(warpid) );
int ctaid = 1;
asm("mov.u32 %0, %ctaid.x;" : "=r"(ctaid) );
int nctaid = 1;
asm("mov.u32 %0, %nctaid.x;" : "=r"(nctaid) );
int ntid = 1;
asm("mov.u32 %0, %ntid.x;" : "=r"(ntid) );
int nsmid = 1;
asm("mov.u32 %0, %nsmid;" : "=r"(nsmid) );
int nwarpid = 1;
asm("mov.u32 %0, %nwarpid;" : "=r"(nwarpid) );
//printf("###1###warpid: %d, ctaid: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d, nctaid.x: %d, ntid: %d, nsmid: %d, nwarpid: %d \n", warpid, ctaid, blockIdx.x, blockIdx.y, blockIdx.z, nctaid, ntid, nsmid, nwarpid);
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###1###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );///printf does not reliably print everything
}
//}
}
if(threadIdx.x % 32 == 0){
printf("warpid: %d, blockIdx.x: %d, smid: %d, threadIdx.x: %d\n", warpid, blockIdx.x, smid, threadIdx.x);
}
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
long long int value = A[index];
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###2###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );
}
//}
}
/*
//////////////////////////////////////////////sleep
long long int start_clock = clock64();
long long int clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
*/
//////////////////////////////////////////////loop
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value = value + threadIdx.x;
}
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###3###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );
}
//}
}
/*
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
if(blockIdx.x == 55){
int nsmid = 1;
asm("mov.u32 %0, %smid;" : "=r"(nsmid) );
printf("nsmid: %d\n", nsmid);
}
}
*/
//d_o[0] = clock_offset;
//////////////////////////////////////////////sleep
A[index] = value;
if(smid == 0){
//if(blockIdx.x == 0){
if(threadIdx.x % 32 == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
printf("###4###warpid: %d, blockIdx.x: %d\n", warpid, blockIdx.x );
}
//}
}
/*
__syncthreads();
__syncthreads();
/////////////////////////////////time
if(index = 0){
start_time= clock64();
time_interval = end_time - start_time;//////clock
}
//B[0] = time_interval;
*/
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 7;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
checkCudaErrors(cudaSetDevice(dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
//plain managed
//does not cause eviction
printf("###################\n#########################managed\n");
///32 * 64 <==> 1 * 512 * 1024 (8gb), 32 * 512 <==> 1 * 64 * 1024 (8gb),
///is it still true that in multi threads the dynamic page threshold is still 64k? no, it seems to be 2k.
//for(long long int data_stride = 1 * 1 * 1; data_stride <= 1 * 512 * 1024; data_stride = data_stride * 2){////not necessarily migrating whole 2m page. (not fair comparison but interesting to look at) 512 * 1024 is 4m, see what happens after 2m.
for(long long int data_stride = 1 * 1 * 1024; data_stride <= 1 * 1 * 1024; data_stride = data_stride * 2){///test
for(long long int mod = 536870912; mod <= 536870912; mod = mod * 2){////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//for(long long int clock_count = 128; clock_count <= 8192; clock_count = clock_count * 2){/////11 positions.
for(long long int clock_count = 1; clock_count <= 1; clock_count = clock_count * 2){/////test
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * 2048;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
//init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
gpu_initialization<<<2048, 512>>>(CPU_data_in, data_stride, data_size);///////////////1024 per block max
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
////may want to use more thread to see clock_count effect
Page_visitor<<<2048, 512>>>(CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 64 * 1 * 512 * 1024 = 8gb.
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
//checkCudaErrors(cudaFree(GPU_data_out));
}
printf("\n");
}
}
/*
//plain managed
//causing eviction
printf("###################\n#########################managed\n");
///32 * 64 <==> 1 * 512 * 1024 (8gb), 32 * 512 <==> 1 * 64 * 1024 (8gb),
///is it still true that in multi threads the dynamic page threshold is still 64k? no, it seems to be 2k.
for(long long int data_stride = 1 * 1 * 1; data_stride <= 1 * 512 * 1024; data_stride = data_stride * 2){////not necessarily migrating whole 2m. 512 * 1024 is 4m, see what happens after 2m.
for(long long int mod = 536870912; mod <= 536870912; mod = mod * 2){////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
for(long long int clock_count = 128; clock_count <= 8192; clock_count = clock_count * 2){/////11 positions.
//for(long long int clock_count = 1; clock_count <= 1; clock_count = clock_count * 2){/////11 positions.
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * 8192;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
////may want to use more thread to see clock_count effect
Page_visitor<<<8192, 512>>>(CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 64 * 1 * 512 * 1024 = 8gb.
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
//checkCudaErrors(cudaFree(GPU_data_out));
}
printf("\n");
}
}
*/
exit(EXIT_SUCCESS);
}
|
05db5e0c9868a1ade24d3ffdff8d8c74386f1521.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* Computes C = alpha*A*B + beta*C when alpha == 0 and beta == 0.
* That is, C = 0.
*/
__global__ void
dgemm_kernel_ab_0(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
C += ibx + idt + __mul24(iby, ldc);
ibx = ibx + idt - m;
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
C[15*ldc] = 0;
break;
case 15:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
break;
case 14:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
break;
case 13:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
break;
case 12:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
break;
case 11:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
break;
case 10:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
break;
case 9:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
break;
case 8:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
break;
case 7:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
break;
case 6:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
break;
case 5:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
break;
case 4:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
break;
case 3:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
break;
case 2:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
break;
case 1:
C[ 0 ] = 0;
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_ab_0(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( dgemm_kernel_ab_0), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
05db5e0c9868a1ade24d3ffdff8d8c74386f1521.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* Computes C = alpha*A*B + beta*C when alpha == 0 and beta == 0.
* That is, C = 0.
*/
__global__ void
dgemm_kernel_ab_0(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
C += ibx + idt + __mul24(iby, ldc);
ibx = ibx + idt - m;
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
C[15*ldc] = 0;
break;
case 15:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
break;
case 14:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
break;
case 13:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
break;
case 12:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
break;
case 11:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
break;
case 10:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
break;
case 9:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
break;
case 8:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
break;
case 7:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
break;
case 6:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
break;
case 5:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
break;
case 4:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
break;
case 3:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
break;
case 2:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
break;
case 1:
C[ 0 ] = 0;
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_ab_0(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
dgemm_kernel_ab_0<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
36a35ecb649a6cabb2df50bf737c03d4c67d959d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgeellmv.cu normal z -> s, Fri Jan 30 19:00:28 2015
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float lambda,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
int offset,
int blocksize,
magma_index_t * addrows,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeellmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
lambda float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
float lambda,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeellmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
36a35ecb649a6cabb2df50bf737c03d4c67d959d.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zgeellmv.cu normal z -> s, Fri Jan 30 19:00:28 2015
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
sgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
float alpha,
float lambda,
float * dval,
magma_index_t * dcolind,
float * dx,
float beta,
int offset,
int blocksize,
magma_index_t * addrows,
float * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_cols_per_row * row + n ];
float val = dval [ num_cols_per_row * row + n ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
sgeellmv_kernel<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha float
scalar multiplier
@param[in]
lambda float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
float alpha,
float lambda,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaFloat_ptr dx,
float beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloat_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
sgeellmv_kernel_shift<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
c9ec97b4d6f002f2ba5c86d304faa5705bc9b95b.hip
|
// !!! This is a file automatically generated by hipify!!!
// Created by Liu Chengjian on 17/10/9.
// Copyright (c) 2017 csliu. All rights reserved.
//
#include <hip/hip_runtime.h>
#include "GCRSMatrix.h"
#include "utils.h"
#include "kernels.hip"
int main(int argc, const char * argv[]) {
if (argc != 3) {
printf("Usage: ./%s workSizePerDataParityBlockInMB numberOfTasks\n", argv[0]);
exit(0);
}
int bufSize = atoi(argv[1]) * 1024 * 1024; // workSize per data parity block
int taskNum = atoi(argv[2]);
double encode_time = 0.0;
#ifdef DUMP
for (int m = 4; m <= 4; ++m) {
for (int n = 8; n <= 8; ++n) { // w is updated in the nested loop
for (int k = MAX_K; k <= MAX_K; ++k) {
#else
for (int m = 1; m <= 4; ++m) {
for (int n = 4; n <= 8; ++n) { // w is updated in the nested loop
for (int k = m; k <= MAX_K; ++k) {
#endif
int w = gcrs_check_k_m_w(k, m, n);
if (w < 0) continue;
#ifdef DUMP
printf("k:%d, m:%d w:%d\n",k,m,w);
#endif
int *bitmatrix = gcrs_create_bitmatrix(k, m, w);
//printMatrix(bitmatrix, k*w, m*w);
// adjust the bufSize
int bufSizePerTask = align_value(bufSize / taskNum, sizeof(long) * w);
bufSize = bufSizePerTask * taskNum;
// compute the bufSize for the last task
int bufSizeForLastTask = bufSize - (bufSizePerTask * (taskNum - 1));
#ifdef DUMP
printf("Total Size:%d Size per task:%d Size for last task:%d\n",
bufSize, bufSizePerTask, bufSizeForLastTask);
#endif
// allocate host buffers
char* data = (char*) malloc (bufSize * k);
char* code = (char*) malloc (bufSize * m);
// initialize host buffer
generateRandomValue(data, bufSize * k);
// allocate device buffers
char* d_data, *d_code;
hipMalloc((void**)&d_data, bufSize * k);
hipMalloc((void**)&d_code, bufSize * m);
int dataSizePerAssign = bufSizePerTask * k;
int codeSizePerAssign = bufSizePerTask * m;
// pointers to the device buffers
char** d_data_ptr = (char**) malloc (sizeof(char*) * taskNum);
char** d_code_ptr = (char**) malloc (sizeof(char*) * taskNum);
for (int i = 0; i < taskNum; ++i) {
d_data_ptr[i] = d_data + dataSizePerAssign * i;
d_code_ptr[i] = d_code + codeSizePerAssign * i;
}
// taskSize will determine the number of kernels to run on a device
int taskSize = 1;
int mRemain = m;
// adjust taskSize
if (m >= MAX_M) {
taskSize = m / MAX_M;
if (m % MAX_M != 0) ++taskSize;
}
#ifdef DUMP
printf("task size: %d\n", taskSize);
#endif
// set up kernel execution parameters
int *mValue = (int*) malloc (sizeof(int) * taskSize);
int *index = (int*) malloc (sizeof(int) * taskSize);
coding_func *coding_function_ptrs = (coding_func*) malloc (sizeof(coding_func) * taskSize);
for (int i = 0; i < taskSize; ++i) {
if (mRemain < MAX_M) {
mValue[i] = mRemain;
}else{
mValue[i] = MAX_M;
mRemain = mRemain - MAX_M;
}
if (i == 0) {
index[i] = 0;
}else{
index[i] = index[i-1] + k * w;
}
coding_function_ptrs[i] = coding_func_array[(mValue[i] - 1) * (MAX_W - MIN_W + 1)+ w - MIN_W];
}
// create and then update encoding bit matrix
unsigned int *all_columns_bitmatrix = (unsigned int*) malloc (sizeof(unsigned int) * k * w * taskSize);
int mValueSum = 0;
for (int i = 0; i < taskSize; ++i) {
unsigned int *column_bitmatrix = gcrs_create_column_coding_bitmatrix(
k, mValue[i], w, bitmatrix + k * w * mValueSum * w);
memcpy((all_columns_bitmatrix + i * k * w), column_bitmatrix, k * w * sizeof(unsigned int));
free(column_bitmatrix);
mValueSum += mValue[i];
}
// allocate bitmatrix on a device
unsigned int *d_bitmatrix;
hipMalloc((void**)&d_bitmatrix, sizeof(unsigned int) * k * w * taskSize);
hipMemcpy(d_bitmatrix, all_columns_bitmatrix,
sizeof(unsigned int) * k * w * taskSize, hipMemcpyHostToDevice);
int warpThreadNum = 32;
int threadNum = MAX_THREAD_NUM;
size_t workSizePerWarp = warpThreadNum / w * w;
size_t workSizePerBlock = threadNum / warpThreadNum * workSizePerWarp * sizeof(size_t);
size_t blockNum = bufSizePerTask / workSizePerBlock;
if ((bufSizePerTask % workSizePerBlock) != 0) {
blockNum = blockNum + 1;
}
#ifdef DUMP
printf("#blocks: %zu blockSize: %d\n", blockNum, threadNum);
#endif
struct timeval startEncodeTime, endEncodeTime;
gettimeofday(&startEncodeTime, NULL);
for (int i = 0; i < taskNum; ++i) {
int count = (i == taskNum-1) ? bufSizeForLastTask : bufSizePerTask;
hipMemcpyAsync(d_data + i * k * bufSizePerTask,
data + i * k * bufSizePerTask, (k * count), hipMemcpyHostToDevice, 0);
int workSizePerGrid = count / sizeof(long);
int size = workSizePerGrid * sizeof(long);
mValueSum = 0;
for (int j = 0; j < taskSize; ++j) {
coding_function_ptrs[j](k, index[j], d_data_ptr[i], d_code_ptr[i]+mValueSum*size,
d_bitmatrix, threadNum, blockNum, workSizePerGrid);
mValueSum += mValue[j];
}
hipMemcpyAsync(
code + i * m * bufSizePerTask,
d_code + i * m * bufSizePerTask,
(m * count), hipMemcpyDeviceToHost, 0);
}
hipDeviceSynchronize();
gettimeofday(&endEncodeTime, NULL);
double etime = elapsed_time_in_ms(startEncodeTime, endEncodeTime);
#ifdef DUMP
printf("Encoding time over %d tasks: %lf (ms)\n", taskNum, etime);
#endif
encode_time += etime;
#ifdef DUMP
for (int i = 0; i < bufSize*m; i++) printf("%d\n", code[i]);
printf("\n");
#endif
hipFree(d_data);
hipFree(d_code);
hipFree(d_bitmatrix);
free(mValue);
free(index);
free(coding_function_ptrs);
free(bitmatrix);
free(all_columns_bitmatrix);
free(d_data_ptr);
free(d_code_ptr);
free(code);
free(data);
}
}
}
printf("Total encoding time %lf (s)\n", encode_time * 1e-3);
return 0;
}
|
c9ec97b4d6f002f2ba5c86d304faa5705bc9b95b.cu
|
// Created by Liu Chengjian on 17/10/9.
// Copyright (c) 2017 csliu. All rights reserved.
//
#include <cuda.h>
#include "GCRSMatrix.h"
#include "utils.h"
#include "kernels.cu"
int main(int argc, const char * argv[]) {
if (argc != 3) {
printf("Usage: ./%s workSizePerDataParityBlockInMB numberOfTasks\n", argv[0]);
exit(0);
}
int bufSize = atoi(argv[1]) * 1024 * 1024; // workSize per data parity block
int taskNum = atoi(argv[2]);
double encode_time = 0.0;
#ifdef DUMP
for (int m = 4; m <= 4; ++m) {
for (int n = 8; n <= 8; ++n) { // w is updated in the nested loop
for (int k = MAX_K; k <= MAX_K; ++k) {
#else
for (int m = 1; m <= 4; ++m) {
for (int n = 4; n <= 8; ++n) { // w is updated in the nested loop
for (int k = m; k <= MAX_K; ++k) {
#endif
int w = gcrs_check_k_m_w(k, m, n);
if (w < 0) continue;
#ifdef DUMP
printf("k:%d, m:%d w:%d\n",k,m,w);
#endif
int *bitmatrix = gcrs_create_bitmatrix(k, m, w);
//printMatrix(bitmatrix, k*w, m*w);
// adjust the bufSize
int bufSizePerTask = align_value(bufSize / taskNum, sizeof(long) * w);
bufSize = bufSizePerTask * taskNum;
// compute the bufSize for the last task
int bufSizeForLastTask = bufSize - (bufSizePerTask * (taskNum - 1));
#ifdef DUMP
printf("Total Size:%d Size per task:%d Size for last task:%d\n",
bufSize, bufSizePerTask, bufSizeForLastTask);
#endif
// allocate host buffers
char* data = (char*) malloc (bufSize * k);
char* code = (char*) malloc (bufSize * m);
// initialize host buffer
generateRandomValue(data, bufSize * k);
// allocate device buffers
char* d_data, *d_code;
cudaMalloc((void**)&d_data, bufSize * k);
cudaMalloc((void**)&d_code, bufSize * m);
int dataSizePerAssign = bufSizePerTask * k;
int codeSizePerAssign = bufSizePerTask * m;
// pointers to the device buffers
char** d_data_ptr = (char**) malloc (sizeof(char*) * taskNum);
char** d_code_ptr = (char**) malloc (sizeof(char*) * taskNum);
for (int i = 0; i < taskNum; ++i) {
d_data_ptr[i] = d_data + dataSizePerAssign * i;
d_code_ptr[i] = d_code + codeSizePerAssign * i;
}
// taskSize will determine the number of kernels to run on a device
int taskSize = 1;
int mRemain = m;
// adjust taskSize
if (m >= MAX_M) {
taskSize = m / MAX_M;
if (m % MAX_M != 0) ++taskSize;
}
#ifdef DUMP
printf("task size: %d\n", taskSize);
#endif
// set up kernel execution parameters
int *mValue = (int*) malloc (sizeof(int) * taskSize);
int *index = (int*) malloc (sizeof(int) * taskSize);
coding_func *coding_function_ptrs = (coding_func*) malloc (sizeof(coding_func) * taskSize);
for (int i = 0; i < taskSize; ++i) {
if (mRemain < MAX_M) {
mValue[i] = mRemain;
}else{
mValue[i] = MAX_M;
mRemain = mRemain - MAX_M;
}
if (i == 0) {
index[i] = 0;
}else{
index[i] = index[i-1] + k * w;
}
coding_function_ptrs[i] = coding_func_array[(mValue[i] - 1) * (MAX_W - MIN_W + 1)+ w - MIN_W];
}
// create and then update encoding bit matrix
unsigned int *all_columns_bitmatrix = (unsigned int*) malloc (sizeof(unsigned int) * k * w * taskSize);
int mValueSum = 0;
for (int i = 0; i < taskSize; ++i) {
unsigned int *column_bitmatrix = gcrs_create_column_coding_bitmatrix(
k, mValue[i], w, bitmatrix + k * w * mValueSum * w);
memcpy((all_columns_bitmatrix + i * k * w), column_bitmatrix, k * w * sizeof(unsigned int));
free(column_bitmatrix);
mValueSum += mValue[i];
}
// allocate bitmatrix on a device
unsigned int *d_bitmatrix;
cudaMalloc((void**)&d_bitmatrix, sizeof(unsigned int) * k * w * taskSize);
cudaMemcpy(d_bitmatrix, all_columns_bitmatrix,
sizeof(unsigned int) * k * w * taskSize, cudaMemcpyHostToDevice);
int warpThreadNum = 32;
int threadNum = MAX_THREAD_NUM;
size_t workSizePerWarp = warpThreadNum / w * w;
size_t workSizePerBlock = threadNum / warpThreadNum * workSizePerWarp * sizeof(size_t);
size_t blockNum = bufSizePerTask / workSizePerBlock;
if ((bufSizePerTask % workSizePerBlock) != 0) {
blockNum = blockNum + 1;
}
#ifdef DUMP
printf("#blocks: %zu blockSize: %d\n", blockNum, threadNum);
#endif
struct timeval startEncodeTime, endEncodeTime;
gettimeofday(&startEncodeTime, NULL);
for (int i = 0; i < taskNum; ++i) {
int count = (i == taskNum-1) ? bufSizeForLastTask : bufSizePerTask;
cudaMemcpyAsync(d_data + i * k * bufSizePerTask,
data + i * k * bufSizePerTask, (k * count), cudaMemcpyHostToDevice, 0);
int workSizePerGrid = count / sizeof(long);
int size = workSizePerGrid * sizeof(long);
mValueSum = 0;
for (int j = 0; j < taskSize; ++j) {
coding_function_ptrs[j](k, index[j], d_data_ptr[i], d_code_ptr[i]+mValueSum*size,
d_bitmatrix, threadNum, blockNum, workSizePerGrid);
mValueSum += mValue[j];
}
cudaMemcpyAsync(
code + i * m * bufSizePerTask,
d_code + i * m * bufSizePerTask,
(m * count), cudaMemcpyDeviceToHost, 0);
}
cudaDeviceSynchronize();
gettimeofday(&endEncodeTime, NULL);
double etime = elapsed_time_in_ms(startEncodeTime, endEncodeTime);
#ifdef DUMP
printf("Encoding time over %d tasks: %lf (ms)\n", taskNum, etime);
#endif
encode_time += etime;
#ifdef DUMP
for (int i = 0; i < bufSize*m; i++) printf("%d\n", code[i]);
printf("\n");
#endif
cudaFree(d_data);
cudaFree(d_code);
cudaFree(d_bitmatrix);
free(mValue);
free(index);
free(coding_function_ptrs);
free(bitmatrix);
free(all_columns_bitmatrix);
free(d_data_ptr);
free(d_code_ptr);
free(code);
free(data);
}
}
}
printf("Total encoding time %lf (s)\n", encode_time * 1e-3);
return 0;
}
|
3c7f91eb120f4e9107203af441b0a34e7fac11a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*$Id: MarsLib.cu 755 2009-11-18 13:22:54Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef __MRLIB_CU__
#define __MRLIB_CU__
#include "MarsInc.h"
#include "map.cu"
#include "reduce.cu"
//----------------------------------------------
//Get default runtime configuration
//
//return: default spec
//----------------------------------------------
Spec_t *GetDefaultSpec()
{
Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t));
if (NULL == spec) exit(-1);
memset(spec, 0, sizeof(Spec_t));
return spec;
}
//--------------------------------------------------------
//Initiate map reduce spec
//--------------------------------------------------------
void InitMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->dimBlockMap <= 0)
g_spec->dimBlockMap = DEFAULT_DIMBLOCK;
if (g_spec->dimBlockReduce <= 0)
g_spec->dimBlockReduce = DEFAULT_DIMBLOCK;
if (g_spec->numRecTaskReduce <= 0)
g_spec->numRecTaskReduce = DEFAULT_NUMTASK;
if (g_spec->numRecTaskMap <= 0)
g_spec->numRecTaskMap = DEFAULT_NUMTASK;
if (g_spec->workflow <= 0)
g_spec->workflow = MAP_ONLY;
}
//--------------------------------------------------
//Add a map input record
//
//param : spec
//param : key -- a pointer to a buffer
//param : val -- a pointer to a buffer
//param : keySize
//param : valSize
//--------------------------------------------------
void AddMapInputRecord(Spec_t* spec,
void* key,
void* val,
int keySize,
int valSize)
{
assert(NULL != spec);
static int2 curOffset;
static int3 curChunkNum;
int index = spec->inputRecordCount;
const int dataChunkSize = 1024*1024*256;
if (spec->inputRecordCount > 0)
{
if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize))
spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize);
memcpy(spec->inputKeys+curOffset.x, key, keySize);
if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize))
spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize);
memcpy(spec->inputVals+curOffset.y, val, valSize);
if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4))
spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes,
(++curChunkNum.z)*dataChunkSize);
}
else
{
spec->inputKeys = (char*)malloc(dataChunkSize);
if (NULL == spec->inputKeys) exit(-1);
memcpy(spec->inputKeys, key, keySize);
spec->inputVals = (char*)malloc(dataChunkSize);
if (NULL == spec->inputVals) exit(-1);
memcpy(spec->inputVals, val, valSize);
spec->inputOffsetSizes = (int4*)malloc(dataChunkSize);
curChunkNum.x++;
curChunkNum.y++;
curChunkNum.z++;
}
spec->inputOffsetSizes[index].x = curOffset.x;
spec->inputOffsetSizes[index].y = keySize;
spec->inputOffsetSizes[index].z = curOffset.y;
spec->inputOffsetSizes[index].w = valSize;
curOffset.x += keySize;
curOffset.y += valSize;
spec->inputRecordCount++;
}
//-------------------------------------------------
//Called by user defined map_count function
//
//param : keySize
//param : valSize
//param : interKeysSizePerTask
//param : interValsSizePerTask
//param : interCountPerTask
//-------------------------------------------------
__device__ void EmitInterCount(int keySize,
int valSize,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask)
{
int index = TID;
interKeysSizePerTask[index] += keySize;
interValsSizePerTask[index] += valSize;
interCountPerTask[index]++;
}
//-------------------------------------------------
//called by user defined map function
//
//-------------------------------------------------
__device__ void EmitIntermediate(void* key,
void* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
int2 l_keyValOffsets = keyValOffsets[index];
char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x);
char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y);
char* sKey = (char*)key;
char* sVal = (char*)val;
for (int i = 0; i < keySize; ++i)
pKeySet[i] = sKey[i];
for (int i = 0; i < valSize; ++i)
pValSet[i] = sVal[i];
l_keyValOffsets.x += keySize;
l_keyValOffsets.y += valSize;
keyValOffsets[index] = l_keyValOffsets;
int l_curIndex = curIndex[index];
int l_psCounts = psCounts[index];
int l_curPs = l_curIndex + l_psCounts;
int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs];
int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1];
if (l_curIndex != 0)
{
l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y);
l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w);
}
l_interOffsetSizes1.y = keySize;
l_interOffsetSizes1.w = valSize;
interOffsetSizes[l_curPs] = l_interOffsetSizes1;
++l_curIndex;
curIndex[index] = l_curIndex;
}
//-------------------------------------------------
//Calculate intermediate data's size
//
//param : inputKeys
//param : inputVals
//param : inputOffsetSizes
//param : interKeysSizesPerTask
//param : interValsSizePerTask
//param : interCountPerTask
//param : recordNum -- total number of records
//param : recordsPerTask
//-------------------------------------------------
__global__ void MapperCount(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map_count(key,
val,
offsetSize.y,
offsetSize.w,
interKeysSizePerTask,
interValsSizePerTask,
interCountPerTask);
}
}
//--------------------------------------------------
//mapper
//--------------------------------------------------
__global__ void Mapper(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
int l_psCounts = psCounts[index];
int4 l_interOffsetSizes = interOffsetSizes[l_psCounts];
l_interOffsetSizes.x = psKeySizes[index];
l_interOffsetSizes.z = psValSizes[index];
interOffsetSizes[l_psCounts] = l_interOffsetSizes;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map(key,
val,
offsetSize.y,
offsetSize.w,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interKeys,
interVals,
interOffsetSizes,
curIndex);
}
}
//--------------------------------------------------
//start map
//
//1, get map input data on host
//2, upload map input data to device memory
// (keys, vals, keyOffsets, valOffsets, keySizes, valSizes)
//3, determine the number of threads to run
//4, calculate intermediate data keys'buf size
// and values' buf size
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//6, allocate intermediate memory on device memory
//7, start map
//8, free allocated memory
//--------------------------------------------------
int startMap(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);}
if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); }
if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); }
if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);}
//-------------------------------------------------------
//1, get map input data on host
//-------------------------------------------------------
int h_inputRecordCount = g_spec->inputRecordCount;
int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x +
g_spec->inputOffsetSizes[h_inputRecordCount-1].y;
int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z +
g_spec->inputOffsetSizes[h_inputRecordCount-1].w;
char* h_inputKeys = g_spec->inputKeys;
char* h_inputVals = g_spec->inputVals;
int4* h_inputOffsetSizes = g_spec->inputOffsetSizes;
DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount);
//-------------------------------------------------------
//2, upload map input data onto device memory
//-------------------------------------------------------
DoLog( "** Upload map input data onto device memory");
TimeVal_t uploadTv;
startTimer(&uploadTv);
char* d_inputKeys = NULL;
char* d_inputVals = NULL;
int4* d_inputOffsetSizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_inputKeys, h_inputKeysBufSize));
CUDA_SAFE_CALL(hipMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMalloc((void**)&d_inputVals, h_inputValsBufSize));
CUDA_SAFE_CALL(hipMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount));
hipMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, hipMemcpyHostToDevice);
endTimer("PCI-E I/O", &uploadTv);
//----------------------------------------------
//3, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockMap,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskMap;
int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
TimeVal_t mapTimer;
startTimer(&mapTimer);
//----------------------------------------------
//4, calculate intermediate data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** MapCount");
int* d_interKeysSizePerTask = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads));
hipMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interValsSizePerTask = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads));
hipMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interCountPerTask = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads));
hipMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads);
hipLaunchKernelGGL(( MapperCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_interKeysSizePerTask,
d_interValsSizePerTask,
d_interCountPerTask,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
//-----------------------------------------------
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on intermediate data's size\n");
int *d_psKeySizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads));
int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads));
int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads));
int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads);
DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts);
if (h_allCounts == 0)
{
DoLog( "** No output.");
hipFree(d_inputKeys);
hipFree(d_inputVals);
hipFree(d_inputOffsetSizes);
hipFree(d_psKeySizes);
hipFree(d_psValSizes);
hipFree(d_psCounts);
endTimer("Map", &mapTimer);
return 1;
}
//-----------------------------------------------
//6, allocate intermediate memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_interKeys = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interKeys, h_allKeySize));
hipMemset(d_interKeys, 0, h_allKeySize);
char* d_interVals = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interVals, h_allValSize));
hipMemset(d_interVals, 0, h_allValSize);
int4* d_interOffsetSizes = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts));
hipMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//7, start map
//--------------------------------------------------
DoLog( "** Map");
int2* d_keyValOffsets = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads));
hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
CUDA_SAFE_CALL(hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads));
hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
hipLaunchKernelGGL(( Mapper), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_keyValOffsets,
d_interKeys,
d_interVals,
d_interOffsetSizes,
d_curIndex,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
g_spec->interKeys = d_interKeys;
g_spec->interVals = d_interVals;
g_spec->interOffsetSizes = d_interOffsetSizes;
g_spec->interRecordCount = h_allCounts;
g_spec->interDiffKeyCount = h_allCounts;
g_spec->interAllKeySize = h_allKeySize;
g_spec->interAllValSize = h_allValSize;
//----------------------------------------------
//8, free
//----------------------------------------------
hipFree(d_interKeysSizePerTask);
hipFree(d_interValsSizePerTask);
hipFree(d_interCountPerTask);
hipFree(d_keyValOffsets);
hipFree(d_curIndex);
hipFree(d_inputKeys);
hipFree(d_inputVals);
hipFree(d_inputOffsetSizes);
hipFree(d_psKeySizes);
hipFree(d_psValSizes);
hipFree(d_psCounts);
endTimer("Map", &mapTimer);
return 0;
}
void startGroup(Spec_t* spec)
{
Spec_t* g_spec = spec;
int interDiffKeyCount = 0;
char* d_outputKeys = NULL;
char* d_outputVals = NULL;
int4* d_outputOffsetSizes = NULL;
int2** h_outputKeyListRange = NULL;
DoLog( "** Sort for group");
CUDA_SAFE_CALL(hipMalloc((void**)&d_outputKeys, g_spec->interAllKeySize));
CUDA_SAFE_CALL(hipMalloc((void**)&d_outputVals, g_spec->interAllValSize));
CUDA_SAFE_CALL(hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount));
h_outputKeyListRange = (int2**)malloc(sizeof(int2*));
saven_initialPrefixSum(g_spec->interRecordCount);
interDiffKeyCount =
sort_GPU (g_spec->interKeys,
g_spec->interAllKeySize,
g_spec->interVals,
g_spec->interAllValSize,
g_spec->interOffsetSizes,
g_spec->interRecordCount,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
h_outputKeyListRange);
DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount);
g_spec->interKeys = d_outputKeys;
g_spec->interVals = d_outputVals;
g_spec->interOffsetSizes = d_outputOffsetSizes;
g_spec->interDiffKeyCount = interDiffKeyCount;
int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2);
CUDA_SAFE_CALL(hipMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize));
CUDA_SAFE_CALL(hipMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, hipMemcpyHostToDevice));
free(*h_outputKeyListRange);
free(h_outputKeyListRange);
}
//--------------------------------------------------------
//get a value from value list of the same key
//
//param : vals
//param : interOffsetSizes
//param : index
//return: the wanted value
//--------------------------------------------------------
__device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)vals + keyIndex * offset.w);
}
__device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)key + keyIndex * offset.y);
}
//---------------------------------------------------------
//called by user defined reduce_count function
//---------------------------------------------------------
__device__ void EmitCount(int keySize,
int valSize,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask)
{
int index = TID;
outputKeysSizePerTask[index] += keySize;
outputValsSizePerTask[index] += valSize;
outputCountPerTask[index]++;
}
//---------------------------------------------------------
//called by user defined reduce function
//---------------------------------------------------------
__device__ void Emit (char* key,
char* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x);
char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y);
for (int i = 0; i < keySize; i++)
pKeySet[i] = key[i];
for (int i = 0; i < valSize; i++)
pValSet[i] = val[i];
keyValOffsets[index].x += keySize;
keyValOffsets[index].y += valSize;
if (curIndex[index] != 0)
{
outputOffsetSizes[psCounts[index] + curIndex[index]].x =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y);
outputOffsetSizes[psCounts[index] + curIndex[index]].z =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w);
}
outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize;
outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize;
curIndex[index]++;
}
//-------------------------------------------------------
//calculate output data's size
//-------------------------------------------------------
__global__ void ReducerCount(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
//for (int i = 0; i <= recordsPerTask; i++)
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce_count(key,
vals,
keySize,
valCount,
interOffsetSizes,
outputKeysSizePerTask,
outputValsSizePerTask,
outputCountPerTask);
}
}
//-------------------------------------------------------
//Reducer
//
//-------------------------------------------------------
__global__ void Reducer(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* psKeySizes,
int* psValSizes,
int* psCounts,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int2* keyValOffsets,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
outputOffsetSizes[psCounts[index]].x = psKeySizes[index];
outputOffsetSizes[psCounts[index]].z = psValSizes[index];
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce(key,
vals,
keySize,
valCount,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interOffsetSizes,
outputKeys,
outputVals,
outputOffsetSizes,
curIndex,
valStartIndex);
}
}
//----------------------------------------------
//start reduce
//
//1, if there is not a reduce phase, just return
// then user uses spec->interKeys/spec->intervals
// for further processing
//2, get reduce input data on host
//3, upload reduce input data onto device memory
//4, determine the number of threads to run
//5, calculate output data keys'buf size
// and values' buf size
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//7, allocate output memory on device memory
//8, start reduce
//9, copy output data to Spect_t structure
//10,free allocated memory
//----------------------------------------------
void startReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);}
if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);}
if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);}
if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);}
if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);}
if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);}
//-------------------------------------------------------
//2, get reduce input data on host
//-------------------------------------------------------
int h_interDiffKeyCount = g_spec->interDiffKeyCount;
char* d_interKeys = g_spec->interKeys;
char* d_interVals = g_spec->interVals;
int4* d_interOffsetSizes = g_spec->interOffsetSizes;
int2* d_interKeyListRange = g_spec->interKeyListRange;
//----------------------------------------------
//4, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockReduce,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskReduce;
int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
//----------------------------------------------
//5, calculate output data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** ReduceCount");
int* d_outputKeysSizePerTask = NULL;
hipMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads);
hipMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputValsSizePerTask = NULL;
hipMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads);
hipMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputCountPerTask = NULL;
hipMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads);
hipMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads);
hipLaunchKernelGGL(( ReducerCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_outputKeysSizePerTask,
d_outputValsSizePerTask,
d_outputCountPerTask,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
//-----------------------------------------------
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on output data's size");
int *d_psKeySizes = NULL;
hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads);
hipMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads);
int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads);
hipMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads);
int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads);
hipMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads);
int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads);
DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts);
//-----------------------------------------------
//7, allocate output memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_outputKeys = NULL;
hipMalloc((void**)&d_outputKeys, h_allKeySize);
char* d_outputVals = NULL;
hipMalloc((void**)&d_outputVals, h_allValSize);
int4* d_outputOffsetSizes = NULL;
hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//8, start reduce
//--------------------------------------------------
DoLog( "** Reduce");
int2* d_keyValOffsets = NULL;
hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads);
hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads);
hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
hipLaunchKernelGGL(( Reducer), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
d_keyValOffsets,
d_curIndex,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
hipDeviceSynchronize();
//-------------------------------------------------------
//9, copy output data to Spec_t structure
//-------------------------------------------------------
g_spec->outputKeys = d_outputKeys;
g_spec->outputVals = d_outputVals;
g_spec->outputOffsetSizes = d_outputOffsetSizes;
g_spec->outputRecordCount = h_allCounts;
g_spec->outputAllKeySize = h_allKeySize;
g_spec->outputAllValSize = h_allValSize;
//----------------------------------------------
//10, free allocated memory
//----------------------------------------------
hipFree(d_interKeys);
hipFree(d_interVals);
hipFree(d_interOffsetSizes);
hipFree(d_outputKeysSizePerTask);
hipFree(d_outputValsSizePerTask);
hipFree(d_outputCountPerTask);
hipFree(d_psKeySizes);
hipFree(d_psValSizes);
hipFree(d_psCounts);
hipFree(d_keyValOffsets);
hipFree(d_curIndex);
}
//----------------------------------------------
//start main map reduce procedure
//1, init device
//2, start map
//3, start reduce
//
//param : spec
//----------------------------------------------
void MapReduce(Spec_t *spec)
{
assert(NULL != spec);
Spec_t* g_spec = spec;
DoLog( "=====start map/reduce=====");
//-------------------------------------------
//1, init device
//-------------------------------------------
//CUT_DEVICE_INIT();
DoLog( "** init GPU");
InitMapReduce(spec);
//-------------------------------------------
//2, start map
//-------------------------------------------
DoLog( "----------start map-----------");
if (startMap(spec))
{
printf("** No output.");
return;
}
if (g_spec->workflow == MAP_ONLY)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
goto EXIT_MR;
}
//-------------------------------------------
//3, start group
//-------------------------------------------
DoLog( "----------start group-----------");
TimeVal_t groupTimer;
startTimer(&groupTimer);
startGroup(spec);
endTimer("Group", &groupTimer);
if (g_spec->workflow == MAP_GROUP)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount;
if (g_spec->outputToHost == 1)
{
g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount);
CUDA_SAFE_CALL(hipMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(g_spec->interKeyListRange));
}
goto EXIT_MR;
}
//-------------------------------------------
//4, start reduce
//-------------------------------------------
DoLog( "----------start reduce--------");
TimeVal_t reduceTimer;
startTimer(&reduceTimer);
startReduce(spec);
endTimer("Reduce", &reduceTimer);
EXIT_MR:
if (g_spec->outputToHost == 1)
{
int indexSize = g_spec->outputRecordCount * sizeof(int4);
char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize);
if (h_outputKeys == NULL) exit(0);
char* h_outputVals = (char*)malloc(g_spec->outputAllValSize);
if (h_outputVals == NULL) exit(0);
int4* h_outputOffsetSizes = (int4*)malloc(indexSize);
if (h_outputOffsetSizes == NULL) exit(0);
CUDA_SAFE_CALL(hipMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(g_spec->outputKeys));
CUDA_SAFE_CALL(hipFree(g_spec->outputVals));
CUDA_SAFE_CALL(hipFree(g_spec->outputOffsetSizes));
g_spec->outputKeys = h_outputKeys;
g_spec->outputVals = h_outputVals;
g_spec->outputOffsetSizes = h_outputOffsetSizes;
}
}
//------------------------------------------
//the last step
//
//1, free global variables' memory
//2, close log file's file pointer
//------------------------------------------
void FinishMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
//-------------------------------------------
//1, free global variables' memory
//-------------------------------------------
free(g_spec->inputKeys);
free(g_spec->inputVals);
free(g_spec->inputOffsetSizes);
if (g_spec->outputToHost == 1)
{
free(g_spec->outputKeys);
free(g_spec->outputVals);
free(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
free(g_spec->outputKeyListRange);
}
else
{
hipFree(g_spec->outputKeys);
hipFree(g_spec->outputVals);
hipFree(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
hipFree(g_spec->outputKeyListRange);
}
free(g_spec);
DoLog( "=====finish map/reduce=====");
}
#endif //__MRLIB_CU__
|
3c7f91eb120f4e9107203af441b0a34e7fac11a8.cu
|
/*$Id: MarsLib.cu 755 2009-11-18 13:22:54Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
#ifndef __MRLIB_CU__
#define __MRLIB_CU__
#include "MarsInc.h"
#include "map.cu"
#include "reduce.cu"
//----------------------------------------------
//Get default runtime configuration
//
//return: default spec
//----------------------------------------------
Spec_t *GetDefaultSpec()
{
Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t));
if (NULL == spec) exit(-1);
memset(spec, 0, sizeof(Spec_t));
return spec;
}
//--------------------------------------------------------
//Initiate map reduce spec
//--------------------------------------------------------
void InitMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->dimBlockMap <= 0)
g_spec->dimBlockMap = DEFAULT_DIMBLOCK;
if (g_spec->dimBlockReduce <= 0)
g_spec->dimBlockReduce = DEFAULT_DIMBLOCK;
if (g_spec->numRecTaskReduce <= 0)
g_spec->numRecTaskReduce = DEFAULT_NUMTASK;
if (g_spec->numRecTaskMap <= 0)
g_spec->numRecTaskMap = DEFAULT_NUMTASK;
if (g_spec->workflow <= 0)
g_spec->workflow = MAP_ONLY;
}
//--------------------------------------------------
//Add a map input record
//
//param : spec
//param : key -- a pointer to a buffer
//param : val -- a pointer to a buffer
//param : keySize
//param : valSize
//--------------------------------------------------
void AddMapInputRecord(Spec_t* spec,
void* key,
void* val,
int keySize,
int valSize)
{
assert(NULL != spec);
static int2 curOffset;
static int3 curChunkNum;
int index = spec->inputRecordCount;
const int dataChunkSize = 1024*1024*256;
if (spec->inputRecordCount > 0)
{
if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize))
spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize);
memcpy(spec->inputKeys+curOffset.x, key, keySize);
if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize))
spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize);
memcpy(spec->inputVals+curOffset.y, val, valSize);
if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4))
spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes,
(++curChunkNum.z)*dataChunkSize);
}
else
{
spec->inputKeys = (char*)malloc(dataChunkSize);
if (NULL == spec->inputKeys) exit(-1);
memcpy(spec->inputKeys, key, keySize);
spec->inputVals = (char*)malloc(dataChunkSize);
if (NULL == spec->inputVals) exit(-1);
memcpy(spec->inputVals, val, valSize);
spec->inputOffsetSizes = (int4*)malloc(dataChunkSize);
curChunkNum.x++;
curChunkNum.y++;
curChunkNum.z++;
}
spec->inputOffsetSizes[index].x = curOffset.x;
spec->inputOffsetSizes[index].y = keySize;
spec->inputOffsetSizes[index].z = curOffset.y;
spec->inputOffsetSizes[index].w = valSize;
curOffset.x += keySize;
curOffset.y += valSize;
spec->inputRecordCount++;
}
//-------------------------------------------------
//Called by user defined map_count function
//
//param : keySize
//param : valSize
//param : interKeysSizePerTask
//param : interValsSizePerTask
//param : interCountPerTask
//-------------------------------------------------
__device__ void EmitInterCount(int keySize,
int valSize,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask)
{
int index = TID;
interKeysSizePerTask[index] += keySize;
interValsSizePerTask[index] += valSize;
interCountPerTask[index]++;
}
//-------------------------------------------------
//called by user defined map function
//
//-------------------------------------------------
__device__ void EmitIntermediate(void* key,
void* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
int2 l_keyValOffsets = keyValOffsets[index];
char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x);
char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y);
char* sKey = (char*)key;
char* sVal = (char*)val;
for (int i = 0; i < keySize; ++i)
pKeySet[i] = sKey[i];
for (int i = 0; i < valSize; ++i)
pValSet[i] = sVal[i];
l_keyValOffsets.x += keySize;
l_keyValOffsets.y += valSize;
keyValOffsets[index] = l_keyValOffsets;
int l_curIndex = curIndex[index];
int l_psCounts = psCounts[index];
int l_curPs = l_curIndex + l_psCounts;
int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs];
int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1];
if (l_curIndex != 0)
{
l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y);
l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w);
}
l_interOffsetSizes1.y = keySize;
l_interOffsetSizes1.w = valSize;
interOffsetSizes[l_curPs] = l_interOffsetSizes1;
++l_curIndex;
curIndex[index] = l_curIndex;
}
//-------------------------------------------------
//Calculate intermediate data's size
//
//param : inputKeys
//param : inputVals
//param : inputOffsetSizes
//param : interKeysSizesPerTask
//param : interValsSizePerTask
//param : interCountPerTask
//param : recordNum -- total number of records
//param : recordsPerTask
//-------------------------------------------------
__global__ void MapperCount(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* interKeysSizePerTask,
int* interValsSizePerTask,
int* interCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map_count(key,
val,
offsetSize.y,
offsetSize.w,
interKeysSizePerTask,
interValsSizePerTask,
interCountPerTask);
}
}
//--------------------------------------------------
//mapper
//--------------------------------------------------
__global__ void Mapper(char* inputKeys,
char* inputVals,
int4* inputOffsetSizes,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* interKeys,
char* interVals,
int4* interOffsetSizes,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
int l_psCounts = psCounts[index];
int4 l_interOffsetSizes = interOffsetSizes[l_psCounts];
l_interOffsetSizes.x = psKeySizes[index];
l_interOffsetSizes.z = psValSizes[index];
interOffsetSizes[l_psCounts] = l_interOffsetSizes;
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int4 offsetSize = inputOffsetSizes[cindex];
char *key = inputKeys + offsetSize.x;
char *val = inputVals + offsetSize.z;
map(key,
val,
offsetSize.y,
offsetSize.w,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interKeys,
interVals,
interOffsetSizes,
curIndex);
}
}
//--------------------------------------------------
//start map
//
//1, get map input data on host
//2, upload map input data to device memory
// (keys, vals, keyOffsets, valOffsets, keySizes, valSizes)
//3, determine the number of threads to run
//4, calculate intermediate data keys'buf size
// and values' buf size
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//6, allocate intermediate memory on device memory
//7, start map
//8, free allocated memory
//--------------------------------------------------
int startMap(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);}
if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); }
if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); }
if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);}
//-------------------------------------------------------
//1, get map input data on host
//-------------------------------------------------------
int h_inputRecordCount = g_spec->inputRecordCount;
int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x +
g_spec->inputOffsetSizes[h_inputRecordCount-1].y;
int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z +
g_spec->inputOffsetSizes[h_inputRecordCount-1].w;
char* h_inputKeys = g_spec->inputKeys;
char* h_inputVals = g_spec->inputVals;
int4* h_inputOffsetSizes = g_spec->inputOffsetSizes;
DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount);
//-------------------------------------------------------
//2, upload map input data onto device memory
//-------------------------------------------------------
DoLog( "** Upload map input data onto device memory");
TimeVal_t uploadTv;
startTimer(&uploadTv);
char* d_inputKeys = NULL;
char* d_inputVals = NULL;
int4* d_inputOffsetSizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputKeys, h_inputKeysBufSize));
CUDA_SAFE_CALL(cudaMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputVals, h_inputValsBufSize));
CUDA_SAFE_CALL(cudaMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount));
cudaMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, cudaMemcpyHostToDevice);
endTimer("PCI-E I/O", &uploadTv);
//----------------------------------------------
//3, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockMap,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskMap;
int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
TimeVal_t mapTimer;
startTimer(&mapTimer);
//----------------------------------------------
//4, calculate intermediate data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** MapCount");
int* d_interKeysSizePerTask = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads));
cudaMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interValsSizePerTask = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads));
cudaMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_interCountPerTask = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads));
cudaMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads);
MapperCount<<<h_dimGrid, h_dimBlock>>>(d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_interKeysSizePerTask,
d_interValsSizePerTask,
d_interCountPerTask,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
//-----------------------------------------------
//5, do prefix sum on--
// i) d_interKeysSizePerTask
// ii) d_interValsSizePerTask
// iii) d_interCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on intermediate data's size\n");
int *d_psKeySizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads));
int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads));
int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads));
int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads);
DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts);
if (h_allCounts == 0)
{
DoLog( "** No output.");
cudaFree(d_inputKeys);
cudaFree(d_inputVals);
cudaFree(d_inputOffsetSizes);
cudaFree(d_psKeySizes);
cudaFree(d_psValSizes);
cudaFree(d_psCounts);
endTimer("Map", &mapTimer);
return 1;
}
//-----------------------------------------------
//6, allocate intermediate memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_interKeys = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interKeys, h_allKeySize));
cudaMemset(d_interKeys, 0, h_allKeySize);
char* d_interVals = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interVals, h_allValSize));
cudaMemset(d_interVals, 0, h_allValSize);
int4* d_interOffsetSizes = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts));
cudaMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//7, start map
//--------------------------------------------------
DoLog( "** Map");
int2* d_keyValOffsets = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads));
cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads));
cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
Mapper<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_inputKeys,
d_inputVals,
d_inputOffsetSizes,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_keyValOffsets,
d_interKeys,
d_interVals,
d_interOffsetSizes,
d_curIndex,
h_inputRecordCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
g_spec->interKeys = d_interKeys;
g_spec->interVals = d_interVals;
g_spec->interOffsetSizes = d_interOffsetSizes;
g_spec->interRecordCount = h_allCounts;
g_spec->interDiffKeyCount = h_allCounts;
g_spec->interAllKeySize = h_allKeySize;
g_spec->interAllValSize = h_allValSize;
//----------------------------------------------
//8, free
//----------------------------------------------
cudaFree(d_interKeysSizePerTask);
cudaFree(d_interValsSizePerTask);
cudaFree(d_interCountPerTask);
cudaFree(d_keyValOffsets);
cudaFree(d_curIndex);
cudaFree(d_inputKeys);
cudaFree(d_inputVals);
cudaFree(d_inputOffsetSizes);
cudaFree(d_psKeySizes);
cudaFree(d_psValSizes);
cudaFree(d_psCounts);
endTimer("Map", &mapTimer);
return 0;
}
void startGroup(Spec_t* spec)
{
Spec_t* g_spec = spec;
int interDiffKeyCount = 0;
char* d_outputKeys = NULL;
char* d_outputVals = NULL;
int4* d_outputOffsetSizes = NULL;
int2** h_outputKeyListRange = NULL;
DoLog( "** Sort for group");
CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputKeys, g_spec->interAllKeySize));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputVals, g_spec->interAllValSize));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount));
h_outputKeyListRange = (int2**)malloc(sizeof(int2*));
saven_initialPrefixSum(g_spec->interRecordCount);
interDiffKeyCount =
sort_GPU (g_spec->interKeys,
g_spec->interAllKeySize,
g_spec->interVals,
g_spec->interAllValSize,
g_spec->interOffsetSizes,
g_spec->interRecordCount,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
h_outputKeyListRange);
DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount);
g_spec->interKeys = d_outputKeys;
g_spec->interVals = d_outputVals;
g_spec->interOffsetSizes = d_outputOffsetSizes;
g_spec->interDiffKeyCount = interDiffKeyCount;
int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2);
CUDA_SAFE_CALL(cudaMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize));
CUDA_SAFE_CALL(cudaMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, cudaMemcpyHostToDevice));
free(*h_outputKeyListRange);
free(h_outputKeyListRange);
}
//--------------------------------------------------------
//get a value from value list of the same key
//
//param : vals
//param : interOffsetSizes
//param : index
//return: the wanted value
//--------------------------------------------------------
__device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)vals + keyIndex * offset.w);
}
__device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex)
{
int4 offset = interOffsetSizes[valStartIndex];
return (void*)((char*)key + keyIndex * offset.y);
}
//---------------------------------------------------------
//called by user defined reduce_count function
//---------------------------------------------------------
__device__ void EmitCount(int keySize,
int valSize,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask)
{
int index = TID;
outputKeysSizePerTask[index] += keySize;
outputValsSizePerTask[index] += valSize;
outputCountPerTask[index]++;
}
//---------------------------------------------------------
//called by user defined reduce function
//---------------------------------------------------------
__device__ void Emit (char* key,
char* val,
int keySize,
int valSize,
int* psKeySizes,
int* psValSizes,
int* psCounts,
int2* keyValOffsets,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int* curIndex)
{
#ifndef __DEVICE_EMULATION__
__syncthreads();
#endif
int index = TID;
char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x);
char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y);
for (int i = 0; i < keySize; i++)
pKeySet[i] = key[i];
for (int i = 0; i < valSize; i++)
pValSet[i] = val[i];
keyValOffsets[index].x += keySize;
keyValOffsets[index].y += valSize;
if (curIndex[index] != 0)
{
outputOffsetSizes[psCounts[index] + curIndex[index]].x =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y);
outputOffsetSizes[psCounts[index] + curIndex[index]].z =
(outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z +
outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w);
}
outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize;
outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize;
curIndex[index]++;
}
//-------------------------------------------------------
//calculate output data's size
//-------------------------------------------------------
__global__ void ReducerCount(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* outputKeysSizePerTask,
int* outputValsSizePerTask,
int* outputCountPerTask,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
//for (int i = 0; i <= recordsPerTask; i++)
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce_count(key,
vals,
keySize,
valCount,
interOffsetSizes,
outputKeysSizePerTask,
outputValsSizePerTask,
outputCountPerTask);
}
}
//-------------------------------------------------------
//Reducer
//
//-------------------------------------------------------
__global__ void Reducer(char* interKeys,
char* interVals,
int4* interOffsetSizes,
int2* interKeyListRange,
int* psKeySizes,
int* psValSizes,
int* psCounts,
char* outputKeys,
char* outputVals,
int4* outputOffsetSizes,
int2* keyValOffsets,
int* curIndex,
int recordNum,
int recordsPerTask,
int taskNum)
{
int index = TID;
int bid = BLOCK_ID;
int tid = THREAD_ID;
if (index*recordsPerTask >= recordNum) return;
int recordBase = bid * recordsPerTask * blockDim.x;
int terminate = (bid + 1) * (recordsPerTask * blockDim.x);
if (terminate > recordNum) terminate = recordNum;
outputOffsetSizes[psCounts[index]].x = psKeySizes[index];
outputOffsetSizes[psCounts[index]].z = psValSizes[index];
for (int i = recordBase + tid; i < terminate; i+=blockDim.x)
{
int cindex = i;
int valStartIndex = interKeyListRange[cindex].x;
int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x;
int keySize = interOffsetSizes[interKeyListRange[cindex].x].y;
char *key = interKeys + interOffsetSizes[valStartIndex].x;
char *vals = interVals + interOffsetSizes[valStartIndex].z;
reduce(key,
vals,
keySize,
valCount,
psKeySizes,
psValSizes,
psCounts,
keyValOffsets,
interOffsetSizes,
outputKeys,
outputVals,
outputOffsetSizes,
curIndex,
valStartIndex);
}
}
//----------------------------------------------
//start reduce
//
//1, if there is not a reduce phase, just return
// then user uses spec->interKeys/spec->intervals
// for further processing
//2, get reduce input data on host
//3, upload reduce input data onto device memory
//4, determine the number of threads to run
//5, calculate output data keys'buf size
// and values' buf size
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//7, allocate output memory on device memory
//8, start reduce
//9, copy output data to Spect_t structure
//10,free allocated memory
//----------------------------------------------
void startReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);}
if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);}
if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);}
if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);}
if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);}
if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);}
//-------------------------------------------------------
//2, get reduce input data on host
//-------------------------------------------------------
int h_interDiffKeyCount = g_spec->interDiffKeyCount;
char* d_interKeys = g_spec->interKeys;
char* d_interVals = g_spec->interVals;
int4* d_interOffsetSizes = g_spec->interOffsetSizes;
int2* d_interKeyListRange = g_spec->interKeyListRange;
//----------------------------------------------
//4, determine the number of threads to run
//----------------------------------------------
dim3 h_dimBlock(g_spec->dimBlockReduce,1,1);
dim3 h_dimGrid(1,1,1);
int h_recordsPerTask = g_spec->numRecTaskReduce;
int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y;
//----------------------------------------------
//5, calculate output data keys'buf size
// and values' buf size
//----------------------------------------------
DoLog( "** ReduceCount");
int* d_outputKeysSizePerTask = NULL;
cudaMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads);
cudaMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputValsSizePerTask = NULL;
cudaMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads);
cudaMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads);
int* d_outputCountPerTask = NULL;
cudaMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads);
cudaMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads);
ReducerCount<<<h_dimGrid, h_dimBlock>>>(d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_outputKeysSizePerTask,
d_outputValsSizePerTask,
d_outputCountPerTask,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
//-----------------------------------------------
//6, do prefix sum on--
// i) d_outputKeysSizePerTask
// ii) d_outputValsSizePerTask
// iii) d_outputCountPerTask
//-----------------------------------------------
DoLog( "** Do prefix sum on output data's size");
int *d_psKeySizes = NULL;
cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads);
cudaMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads);
int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads);
int *d_psValSizes = NULL;
cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads);
cudaMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads);
int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads);
int *d_psCounts = NULL;
cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads);
cudaMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads);
int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads);
DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records",
h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts);
//-----------------------------------------------
//7, allocate output memory on device memory
//-----------------------------------------------
DoLog( "** Allocate intermediate memory on device memory");
char* d_outputKeys = NULL;
cudaMalloc((void**)&d_outputKeys, h_allKeySize);
char* d_outputVals = NULL;
cudaMalloc((void**)&d_outputVals, h_allValSize);
int4* d_outputOffsetSizes = NULL;
cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts);
//--------------------------------------------------
//8, start reduce
//--------------------------------------------------
DoLog( "** Reduce");
int2* d_keyValOffsets = NULL;
cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads);
cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads);
int* d_curIndex = NULL;
cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads);
cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads);
int sizeSmem = h_dimBlock.x * sizeof(int) * 5;
Reducer<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_interKeys,
d_interVals,
d_interOffsetSizes,
d_interKeyListRange,
d_psKeySizes,
d_psValSizes,
d_psCounts,
d_outputKeys,
d_outputVals,
d_outputOffsetSizes,
d_keyValOffsets,
d_curIndex,
h_interDiffKeyCount,
h_recordsPerTask,
h_actualNumThreads);
cudaThreadSynchronize();
//-------------------------------------------------------
//9, copy output data to Spec_t structure
//-------------------------------------------------------
g_spec->outputKeys = d_outputKeys;
g_spec->outputVals = d_outputVals;
g_spec->outputOffsetSizes = d_outputOffsetSizes;
g_spec->outputRecordCount = h_allCounts;
g_spec->outputAllKeySize = h_allKeySize;
g_spec->outputAllValSize = h_allValSize;
//----------------------------------------------
//10, free allocated memory
//----------------------------------------------
cudaFree(d_interKeys);
cudaFree(d_interVals);
cudaFree(d_interOffsetSizes);
cudaFree(d_outputKeysSizePerTask);
cudaFree(d_outputValsSizePerTask);
cudaFree(d_outputCountPerTask);
cudaFree(d_psKeySizes);
cudaFree(d_psValSizes);
cudaFree(d_psCounts);
cudaFree(d_keyValOffsets);
cudaFree(d_curIndex);
}
//----------------------------------------------
//start main map reduce procedure
//1, init device
//2, start map
//3, start reduce
//
//param : spec
//----------------------------------------------
void MapReduce(Spec_t *spec)
{
assert(NULL != spec);
Spec_t* g_spec = spec;
DoLog( "=====start map/reduce=====");
//-------------------------------------------
//1, init device
//-------------------------------------------
//CUT_DEVICE_INIT();
DoLog( "** init GPU");
InitMapReduce(spec);
//-------------------------------------------
//2, start map
//-------------------------------------------
DoLog( "----------start map-----------");
if (startMap(spec))
{
printf("** No output.");
return;
}
if (g_spec->workflow == MAP_ONLY)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
goto EXIT_MR;
}
//-------------------------------------------
//3, start group
//-------------------------------------------
DoLog( "----------start group-----------");
TimeVal_t groupTimer;
startTimer(&groupTimer);
startGroup(spec);
endTimer("Group", &groupTimer);
if (g_spec->workflow == MAP_GROUP)
{
g_spec->outputKeys = g_spec->interKeys;
g_spec->outputVals = g_spec->interVals;
g_spec->outputOffsetSizes = g_spec->interOffsetSizes;
g_spec->outputRecordCount = g_spec->interRecordCount;
g_spec->outputAllKeySize = g_spec->interAllKeySize;
g_spec->outputAllValSize = g_spec->interAllValSize;
g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount;
if (g_spec->outputToHost == 1)
{
g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount);
CUDA_SAFE_CALL(cudaMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(g_spec->interKeyListRange));
}
goto EXIT_MR;
}
//-------------------------------------------
//4, start reduce
//-------------------------------------------
DoLog( "----------start reduce--------");
TimeVal_t reduceTimer;
startTimer(&reduceTimer);
startReduce(spec);
endTimer("Reduce", &reduceTimer);
EXIT_MR:
if (g_spec->outputToHost == 1)
{
int indexSize = g_spec->outputRecordCount * sizeof(int4);
char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize);
if (h_outputKeys == NULL) exit(0);
char* h_outputVals = (char*)malloc(g_spec->outputAllValSize);
if (h_outputVals == NULL) exit(0);
int4* h_outputOffsetSizes = (int4*)malloc(indexSize);
if (h_outputOffsetSizes == NULL) exit(0);
CUDA_SAFE_CALL(cudaMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(g_spec->outputKeys));
CUDA_SAFE_CALL(cudaFree(g_spec->outputVals));
CUDA_SAFE_CALL(cudaFree(g_spec->outputOffsetSizes));
g_spec->outputKeys = h_outputKeys;
g_spec->outputVals = h_outputVals;
g_spec->outputOffsetSizes = h_outputOffsetSizes;
}
}
//------------------------------------------
//the last step
//
//1, free global variables' memory
//2, close log file's file pointer
//------------------------------------------
void FinishMapReduce(Spec_t* spec)
{
Spec_t* g_spec = spec;
//-------------------------------------------
//1, free global variables' memory
//-------------------------------------------
free(g_spec->inputKeys);
free(g_spec->inputVals);
free(g_spec->inputOffsetSizes);
if (g_spec->outputToHost == 1)
{
free(g_spec->outputKeys);
free(g_spec->outputVals);
free(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
free(g_spec->outputKeyListRange);
}
else
{
cudaFree(g_spec->outputKeys);
cudaFree(g_spec->outputVals);
cudaFree(g_spec->outputOffsetSizes);
if (g_spec->workflow == MAP_GROUP)
cudaFree(g_spec->outputKeyListRange);
}
free(g_spec);
DoLog( "=====finish map/reduce=====");
}
#endif //__MRLIB_CU__
|
5b031045dcaa9ab5d51cadc9b406554e6a866dff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NNEU %(nneu)d
#define n_micro = 30000.0
__global__ void rpam(
int neu_num,
%(type)s *Np,
%(type)s *n_photon,
%(type)s *rand)
{
bool not_converged = true;
%(type)s lambda_m, n_m, fe, fa, n_m_temp;
n_m = n_micro;
lambda_m = 0;
float fx[6];
int factorial = 1;
factorial = 1;
lambda_m = n_photon[neu_num]/n_micro;
for(int ii = 0; ii < 20; ++ii){
if (ii > 0){
factorial = factorial * ii;
}
p[ii] = exp(-lambda_m) * (pow(lambda_m, ii)) / factorial;
}
int num_abs[20];
for(int ii = 1; ii < 20; ++ii){
num_abs[ii] = p[ii]*n_micro;
}
num_abs[0] = 0;
for(int ii = 1; ii < 20; ++ii){
for(int jj = 0; jj < num_abs[ii];++jj){
Np[rand[jj+num_abs[ii -1]]] = ii;
}
}
}
|
5b031045dcaa9ab5d51cadc9b406554e6a866dff.cu
|
#define NNEU %(nneu)d
#define n_micro = 30000.0
__global__ void rpam(
int neu_num,
%(type)s *Np,
%(type)s *n_photon,
%(type)s *rand)
{
bool not_converged = true;
%(type)s lambda_m, n_m, fe, fa, n_m_temp;
n_m = n_micro;
lambda_m = 0;
float fx[6];
int factorial = 1;
factorial = 1;
lambda_m = n_photon[neu_num]/n_micro;
for(int ii = 0; ii < 20; ++ii){
if (ii > 0){
factorial = factorial * ii;
}
p[ii] = exp(-lambda_m) * (pow(lambda_m, ii)) / factorial;
}
int num_abs[20];
for(int ii = 1; ii < 20; ++ii){
num_abs[ii] = p[ii]*n_micro;
}
num_abs[0] = 0;
for(int ii = 1; ii < 20; ++ii){
for(int jj = 0; jj < num_abs[ii];++jj){
Np[rand[jj+num_abs[ii -1]]] = ii;
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.