hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b4f72ec8ea43dc99762d7e375083aa4bce06b190.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
__device__ int d_change;
__global__ void bellman_ford(int *d_g, int *d_d, int k, int n)
{
d_change = 0;
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<n)
{
int cur_dis = d_d[i];
__syncthreads();
int j;
for (j=1; j<=d_g[i*n+0];j++)
{
if (cur_dis > d_d[d_g[i*n+j]] + 1)
{
cur_dis = d_d[d_g[i*n+j]] + 1;
d_change = 1;
}
}
__syncthreads();
d_d[i] = cur_dis;
}
}
int h_graph[9000][9000];
int main( int argc, char* argv[] )
{
FILE *fp = fopen("wiki-Vote.txt","r");
int source =0,dest=0, n =9000,i;
srand(time(NULL));
while(!feof(fp))
{
fscanf(fp,"%d",&source);
fscanf(fp,"%d",&dest);
h_graph[dest][0]++;
h_graph[dest][h_graph[dest][0]] = source;
}
fclose(fp);
int *d_g;
const size_t a_size = sizeof(int) * size_t(n*n);
int block_size = atoi(argv[1]);
int n_blocks = n/block_size + (n%block_size==0?0:1);
int h_s = 3;
int h_d[9000], *d_d, k;
for(i=0; i<n; i++)
h_d[i] = (int)1e5;
h_d[h_s] = 0;
float time;
hipEvent_t start, stop;
hipEventCreate(&start) ;
hipEventCreate(&stop) ;
hipEventRecord(start, 0) ;
hipMalloc((void **)&d_g, a_size);
hipMemcpy(d_g, h_graph, a_size, hipMemcpyHostToDevice);
hipMalloc(&d_d, n*sizeof(int));
hipMemcpy(d_d, h_d,n*sizeof(int),hipMemcpyHostToDevice);
hipEventRecord(stop, 0) ;
hipEventSynchronize(stop) ;
hipEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
hipEventCreate(&start) ;
hipEventCreate(&stop) ;
hipEventRecord(start, 0) ;
for (k=0;k<n-1;k++)
{
hipLaunchKernelGGL(( bellman_ford), dim3(n_blocks),dim3(block_size), 0, 0, d_g, d_d, k, n);
int answer;
hipMemcpyFromSymbol(&answer, d_change, sizeof(int), 0, hipMemcpyDeviceToHost);
if (answer == 0)
break;
}
hipEventRecord(stop, 0) ;
hipEventSynchronize(stop) ;
hipEventElapsedTime(&time, start, stop) ;
printf("e %f\n", time);
hipEventCreate(&start) ;
hipEventCreate(&stop) ;
hipEventRecord(start, 0) ;
hipMemcpy(h_d, d_d,n*sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(stop, 0) ;
hipEventSynchronize(stop) ;
hipEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
FILE *op = fopen("bellman-al.txt","w");
for (i=0;i<n;i++)
{
fprintf(op,"%d: %d\n",i,h_d[i]);
}
fclose(op);
return 0;
}
| b4f72ec8ea43dc99762d7e375083aa4bce06b190.cu | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
__device__ int d_change;
__global__ void bellman_ford(int *d_g, int *d_d, int k, int n)
{
d_change = 0;
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i<n)
{
int cur_dis = d_d[i];
__syncthreads();
int j;
for (j=1; j<=d_g[i*n+0];j++)
{
if (cur_dis > d_d[d_g[i*n+j]] + 1)
{
cur_dis = d_d[d_g[i*n+j]] + 1;
d_change = 1;
}
}
__syncthreads();
d_d[i] = cur_dis;
}
}
int h_graph[9000][9000];
int main( int argc, char* argv[] )
{
FILE *fp = fopen("wiki-Vote.txt","r");
int source =0,dest=0, n =9000,i;
srand(time(NULL));
while(!feof(fp))
{
fscanf(fp,"%d",&source);
fscanf(fp,"%d",&dest);
h_graph[dest][0]++;
h_graph[dest][h_graph[dest][0]] = source;
}
fclose(fp);
int *d_g;
const size_t a_size = sizeof(int) * size_t(n*n);
int block_size = atoi(argv[1]);
int n_blocks = n/block_size + (n%block_size==0?0:1);
int h_s = 3;
int h_d[9000], *d_d, k;
for(i=0; i<n; i++)
h_d[i] = (int)1e5;
h_d[h_s] = 0;
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
cudaMalloc((void **)&d_g, a_size);
cudaMemcpy(d_g, h_graph, a_size, cudaMemcpyHostToDevice);
cudaMalloc(&d_d, n*sizeof(int));
cudaMemcpy(d_d, h_d,n*sizeof(int),cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
for (k=0;k<n-1;k++)
{
bellman_ford<<<n_blocks,block_size>>>(d_g, d_d, k, n);
int answer;
cudaMemcpyFromSymbol(&answer, d_change, sizeof(int), 0, cudaMemcpyDeviceToHost);
if (answer == 0)
break;
}
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("e %f\n", time);
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
cudaMemcpy(h_d, d_d,n*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
FILE *op = fopen("bellman-al.txt","w");
for (i=0;i<n;i++)
{
fprintf(op,"%d: %d\n",i,h_d[i]);
}
fclose(op);
return 0;
}
|
55301c8045b6ae2b200a11db9e4d13d4a918e13a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <iostream>
__global__ void gaussian_blur(const unsigned char *const inputChannel,
unsigned char *const outputChannel,
int numRows, int numCols,
const float *const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = threadIdx.x + blockIdx.x * blockDim.x;
int r = threadIdx.y + blockIdx.y * blockDim.y;
if (r >= numRows || c >= numCols)
return;
float sum = 0.0f;
for (int fr = 0; fr < filterWidth; ++fr)
{
for (int fc = 0; fc < filterWidth; ++fc)
{
int _r = r + fr - (filterWidth / 2);
int _c = c + fc - (filterWidth / 2);
_r = _r < numRows ? (_r >= 0 ? _r : 0) : numRows - 1;
_c = _c < numCols ? (_c >= 0 ? _c : 0) : numCols - 1;
auto pixel = inputChannel[_r * numCols + _c];
sum = sum + pixel * filter[fr * filterWidth + fc];
}
}
outputChannel[r * numCols + c] = static_cast<unsigned char>(sum);
// __shared__ float sum;
// if (threadIdx.x == 0 && threadIdx.y == 0)
// sum = 0.0f;
// __syncthreads();
// int filter_idx = threadIdx.x;
// int filter_idy = threadIdx.y;
// if (blockIdx.x >= numCols || blockIdx.y >= numRows)
// ;
// else
// {
// int _x = blockIdx.x + (filter_idx - (filterWidth / 2));
// int _y = blockIdx.y + (filter_idy - (filterWidth / 2));
// _x = _x < numCols ? (_x >= 0 ? _x : 0) : numCols - 1;
// _y = _y < numRows ? (_y >= 0 ? _y : 0) : numRows - 1;
// const char input_pixel = inputChannel[_y * numCols + _x];
// atomicAdd(&sum, filter[filter_idy * filterWidth + filter_idx] * input_pixel);
// }
// __syncthreads();
// if (blockIdx.x >= numCols || blockIdx.y >= numRows)
// ;
// else if (threadIdx.x == 0 && threadIdx.y == 0)
// outputChannel[blockIdx.y * numCols + blockIdx.x] = static_cast<unsigned char>(sum);
// // }
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4 *const inputImageRGBA,
int numRows,
int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int c = threadIdx.x + blockIdx.x * blockDim.x;
int r = threadIdx.y + blockIdx.y * blockDim.y;
int i = r * numCols + c;
if (i < numRows * numCols)
{
redChannel[i] = inputImageRGBA[i].x;
greenChannel[i] = inputImageRGBA[i].y;
blueChannel[i] = inputImageRGBA[i].z;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__ void recombineChannels(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float *const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc((void **)&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 *const h_inputImageRGBA, uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / 32 + 1, numRows / 32 + 1);
// float *filter_ = new float[filterWidth * filterWidth];
// checkCudaErrors(hipMemcpy(filter_, d_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyDeviceToHost));
// std::cout << "Filter: \n";
// float sum = 0.0f;
// for (int k = 0; k < filterWidth; ++k)
// {
// std::cout << std::endl;
// for (int i = k * filterWidth; i < (k + 1) * filterWidth; ++i)
// {
// std::cout << filter_[i] << "\t";
// sum += filter_[i];
// }
// }
// std::cout << std::endl
// << "Sum: " << sum << std::endl;
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup()
{
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 55301c8045b6ae2b200a11db9e4d13d4a918e13a.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <iostream>
__global__ void gaussian_blur(const unsigned char *const inputChannel,
unsigned char *const outputChannel,
int numRows, int numCols,
const float *const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = threadIdx.x + blockIdx.x * blockDim.x;
int r = threadIdx.y + blockIdx.y * blockDim.y;
if (r >= numRows || c >= numCols)
return;
float sum = 0.0f;
for (int fr = 0; fr < filterWidth; ++fr)
{
for (int fc = 0; fc < filterWidth; ++fc)
{
int _r = r + fr - (filterWidth / 2);
int _c = c + fc - (filterWidth / 2);
_r = _r < numRows ? (_r >= 0 ? _r : 0) : numRows - 1;
_c = _c < numCols ? (_c >= 0 ? _c : 0) : numCols - 1;
auto pixel = inputChannel[_r * numCols + _c];
sum = sum + pixel * filter[fr * filterWidth + fc];
}
}
outputChannel[r * numCols + c] = static_cast<unsigned char>(sum);
// __shared__ float sum;
// if (threadIdx.x == 0 && threadIdx.y == 0)
// sum = 0.0f;
// __syncthreads();
// int filter_idx = threadIdx.x;
// int filter_idy = threadIdx.y;
// if (blockIdx.x >= numCols || blockIdx.y >= numRows)
// ;
// else
// {
// int _x = blockIdx.x + (filter_idx - (filterWidth / 2));
// int _y = blockIdx.y + (filter_idy - (filterWidth / 2));
// _x = _x < numCols ? (_x >= 0 ? _x : 0) : numCols - 1;
// _y = _y < numRows ? (_y >= 0 ? _y : 0) : numRows - 1;
// const char input_pixel = inputChannel[_y * numCols + _x];
// atomicAdd(&sum, filter[filter_idy * filterWidth + filter_idx] * input_pixel);
// }
// __syncthreads();
// if (blockIdx.x >= numCols || blockIdx.y >= numRows)
// ;
// else if (threadIdx.x == 0 && threadIdx.y == 0)
// outputChannel[blockIdx.y * numCols + blockIdx.x] = static_cast<unsigned char>(sum);
// // }
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4 *const inputImageRGBA,
int numRows,
int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int c = threadIdx.x + blockIdx.x * blockDim.x;
int r = threadIdx.y + blockIdx.y * blockDim.y;
int i = r * numCols + c;
if (i < numRows * numCols)
{
redChannel[i] = inputImageRGBA[i].x;
greenChannel[i] = inputImageRGBA[i].y;
blueChannel[i] = inputImageRGBA[i].z;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__ void recombineChannels(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float *const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc((void **)&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 *const h_inputImageRGBA, uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / 32 + 1, numRows / 32 + 1);
// float *filter_ = new float[filterWidth * filterWidth];
// checkCudaErrors(cudaMemcpy(filter_, d_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyDeviceToHost));
// std::cout << "Filter: \n";
// float sum = 0.0f;
// for (int k = 0; k < filterWidth; ++k)
// {
// std::cout << std::endl;
// for (int i = k * filterWidth; i < (k + 1) * filterWidth; ++i)
// {
// std::cout << filter_[i] << "\t";
// sum += filter_[i];
// }
// }
// std::cout << std::endl
// << "Sum: " << sum << std::endl;
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup()
{
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
76b7f78546900f383e56101d69b30496cd61b0d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/embed.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, typename T1>
__global__ void kernel_embed_forward(const int num, T1 *y, const T *x,
const T1 *w, int stride0) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
y[idx] = w[x[i] * stride0 + j];
}
}
template <typename T, typename T1>
__global__ void kernel_embed_backward_weight(const int num, T1 *dw, const T *x,
const T1 *dy, int stride0) {
// TODO: optimize
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
atomicAdd(dw + x[i] * stride0 + j, dy[i * stride0 + j]);
}
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Embed<T, T1>::setup_impl(inputs, outputs);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
const T1 *w = inputs[1]->get_data_pointer<T1>(this->ctx_);
T1 *y = outputs[0]->cast_data_and_get_pointer<T1>(this->ctx_);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_forward,
inputs[0]->size() * stride0, y, x, w, stride0);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
NBLA_CHECK(!propagate_down[0], error_code::value,
"Index array can not be propagated down.");
if (!propagate_down[1]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
if (!accum[1])
inputs[1]->grad()->zero();
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T1 *dw = inputs[1]->cast_grad_and_get_pointer<T1>(this->ctx_);
const T1 *dy = outputs[0]->get_grad_pointer<T1>(this->ctx_);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_backward_weight,
inputs[0]->size() * stride0, dw, x, dy,
stride0);
}
// template instantiation
template class EmbedCuda<int, float>;
}
| 76b7f78546900f383e56101d69b30496cd61b0d4.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/embed.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, typename T1>
__global__ void kernel_embed_forward(const int num, T1 *y, const T *x,
const T1 *w, int stride0) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
y[idx] = w[x[i] * stride0 + j];
}
}
template <typename T, typename T1>
__global__ void kernel_embed_backward_weight(const int num, T1 *dw, const T *x,
const T1 *dy, int stride0) {
// TODO: optimize
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
atomicAdd(dw + x[i] * stride0 + j, dy[i * stride0 + j]);
}
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Embed<T, T1>::setup_impl(inputs, outputs);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
const T1 *w = inputs[1]->get_data_pointer<T1>(this->ctx_);
T1 *y = outputs[0]->cast_data_and_get_pointer<T1>(this->ctx_);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_forward,
inputs[0]->size() * stride0, y, x, w, stride0);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
NBLA_CHECK(!propagate_down[0], error_code::value,
"Index array can not be propagated down.");
if (!propagate_down[1]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
if (!accum[1])
inputs[1]->grad()->zero();
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T1 *dw = inputs[1]->cast_grad_and_get_pointer<T1>(this->ctx_);
const T1 *dy = outputs[0]->get_grad_pointer<T1>(this->ctx_);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_backward_weight,
inputs[0]->size() * stride0, dw, x, dy,
stride0);
}
// template instantiation
template class EmbedCuda<int, float>;
}
|
e8b0c3c104f065f283d6c2df7b628179339c13b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* author: ck
* 26.04.2011
*/
#include "mex.h"
#include "rocblas.h"
#include "cutil_inline.h"
#include <iostream>
#include <algorithm>
#include "cuPrintf.hip"
#define BLOCK_SIZE 400
// setup execution parameters
//dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//dim3 grid(WC / threads.x, HC / threads.y);
int blocks=BLOCK_SIZE;
//int threads=400;
int threads=100;
// Tensor .* operation. Multiply corresponding entries of tensors A,B of same size
// Store the result in tensor C
// two operators are available
// hadamard product: multiplies each element of input objects elementwise
// C = A .* B
// requires two input tensors A, B as input
// contract product: performs matrix multiplication if elements are 2 dimensional
// C = A * B
// requires five input arguments A, A_cardinalities, B, B_cardinalities, C_cardinalities
// objects (A,B,C) must have same number of dimensions
void print( const mxArray *prhs[], float* output, int total_size);
// cuda tensor operation configuration object
struct ct_config{
// defines how many dimensions are there
size_t ndims;
// defines the maximum possible size of each dimension
// for all tensors using this configuration
// must be allocated dynamically as an array of type size_t
// size of the array must be equal to ndims
size_t* cardinalities;
// total size of the related objects
// maximum of cardinality of input objects
// cardinality for an object is found by multiplying object's cardinalities of each dimension
size_t total_cardinality;
// number of elements in the data
size_t element_number;
// index of the dimension to contract over
//size_t contract_dim;
};
// cuda tensor object
struct ct{
// related configuration object
ct_config* config;
// defines size of each dimension for this tensor
// must be allocated dynamically as an array of type size_t
// size of the array must be equal to config.ndims
size_t* cardinalities;
// size of the corresponding data
size_t mem_size;
// points to the values of this tensor
float* data;
};
// compact structure carying pointers to elements of a cudatensor on the device
struct dev_ct_ptrs{
ct* ct;
ct_config* ctc;
size_t* cardinalities;
float* data;
};
// multiply corresponding elemens of A, B tensors, put result in tensor C
__global__ void
tensorHadamard( ct* C, ct* A, ct* B)
{
// Block index
size_t bx = blockIdx.x;
//int by = blockIdx.y;
// Thread index
size_t tx = threadIdx.x;
//int ty = threadIdx.y;
size_t threadsPerblock = blockDim.x * blockDim.y * blockDim.z;
size_t thread_id = bx * threadsPerblock + tx;
if ( thread_id < A->config->element_number ){
C->data[thread_id] = A->data[thread_id] * B->data[thread_id];
}
}
// multiply corresponding elements and contract along specified dimension
__global__ void
tensorContract( ct* C_full, ct* C, ct* A, ct* B )
{
size_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
//size_t thread_id = threadIdx.x + (threadIdx.y * blockDim.x) + (threadIdx.x * threadIdx.y * blockDim.y);
//size_t block_id = blockIdx.x + (blockIdx.y * gridDim.x);
// assumes same total dimensions and cardinalities for all objects
size_t tot_card = A->config->total_cardinality;
size_t ndims = A->config->ndims;
if ( thread_id < tot_card ){
//extern __shared__ int C_shared[];
//size_t uclu[3];
//for (size_t i=0; i<3; i++) {uclu[i]=0; }
int index_number_A=0;
int index_number_B=0;
int index_number_C=0;
for (size_t obj=0; obj<2; obj++){
ct* p;
if (obj==0) p = A;
else if (obj==1) p = B;
//else if (obj==2) p = C;
size_t t_id_rem = thread_id;
size_t cumulative_offset_ind = 1;
size_t cumulative_offset_elnum = 1;
size_t cur_card_index=0;
for (size_t card_index=0; card_index < ndims; card_index++){
if ( t_id_rem == 0 ) break;
//cuPrintf("card_index %d t_id_rem %d cumulative_offset_ind %d\n",card_index, t_id_rem, cumulative_offset_ind);
//uclu[card_index] = (t_id_rem % p->config->cardinalities[card_index]);// * cumulative_offset_ind;
cur_card_index = (t_id_rem % p->config->cardinalities[card_index]);// * cumulative_offset_ind;
t_id_rem = (size_t) t_id_rem / p->config->cardinalities[card_index];
if (p->cardinalities[card_index] != 0){
// int olmazsa patliyor?
if (obj==0) index_number_A += (int)cur_card_index * (int)cumulative_offset_elnum;
else if (obj==1) index_number_B += (int)cur_card_index * (int)cumulative_offset_elnum;
//else if (obj==2) index_number_C += (int)cur_card_index * (int)cumulative_offset_elnum;
// increment cumulative offset with current dimension cardinality for next loop
// -1 for cardinalities are indexed from 1
//cumulative_offset_ind *= p->config->cardinalities[card_index] - 1 ;
cumulative_offset_elnum *= p->config->cardinalities[card_index] ;
}
}
}
size_t tmpB = B->data[index_number_B];
size_t tmpA= A->data[index_number_A];
size_t tmpC_full= C_full->data[thread_id];
//cuPrintf("C[%d] %d += A[%d] %d * B[%d] %d\n", thread_id, tmpC_full, index_number_A, tmpA, index_number_B, tmpB);
//~/arastir/cuda2/cudainstall/3.2/sdk/C/src/reduction/doc/reduction.pdf
// extern __shared__ int sdata[];
// // each thread loads one element from global to shared mem
// unsigned int tid = threadIdx.x;
// unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
// sdata[tid] = C->data[i];
__syncthreads();
// sdata[index_number_C] += A->data[index_number_A] * B->data[index_number_B];
//cuPrintf("C_full->data[%d] = %d ", thread_id, tmpA * tmpB);
C_full->data[thread_id] = A->data[index_number_A] * B->data[index_number_B];
// size_t tmpC_shared = C_shared[index_number_C];
// cuPrintf("C_shared[%d] %d += %d * %d\n", index_number_C, tmpC_shared, tmpA,tmpB);
// C_shared[index_number_C] += A->data[index_number_A] * B->data[index_number_B];
// __syncthreads();
// if(thread_id < 4){
// C->data[thread_id] = C_shared[thread_id];
// }
// contract on dimensions with zero cardinality
size_t cum_card=1;
bool foundone=false;
for (size_t card_index=0; card_index<ndims; card_index++){
size_t current_card=C->cardinalities[card_index];
if( current_card == 0 ) {
foundone=true;
// contract on this dimension
size_t C_ind=0;
for (size_t C_full_ind=0; C_full_ind < tot_card-1;){
size_t tmp=0;
for (size_t el=0; el<cum_card; el++){
size_t increment = el * (cum_card);
size_t tmpcf = C_full->data[C_full_ind + increment];
if(thread_id==0)
cuPrintf("C_full_ind %d: tmp %d += C_full->data[ %d + %d ] %d \n", C_full_ind, tmp , C_full_ind, increment , tmpcf);
tmp += tmpcf;
}
C->data[C_ind] = tmp;
C_ind++;
if (C_full_ind % cum_card == (cum_card-1) ){
C_full_ind += cum_card * (cum_card-1) + 1;
}else{
C_full_ind++;
}
}
}
cum_card *= current_card;
}
if (foundone == false){
C->data[thread_id] = C_full->data[thread_id];
}
// size_t tmpS= sdata[tid];
// cuPrintf("C %d\n",tmpS);
//tmpC= C->data[index_number_C];
//cuPrintf("C %d\n",tmpC);
//cuPrintf("uclu %d %d %d index_number_A %d index_number_B %d\n", uclu[0], uclu[1], uclu[2], (int)index_number_A, index_number_B);
//cuPrintf("A %d B %d C %d\n", index_number_A, index_number_B, index_number_C);
}
}
__global__ void
assignCudatensorConfig( ct_config* ctc, size_t* cards ){
ctc->cardinalities = cards;
}
__global__ void
assignCudatensor( ct* c, ct_config* ctc, size_t* cards, size_t mem_size, float* data){
c->config = ctc;
c->cardinalities = cards;
c->mem_size = mem_size;
c->data = data;
}
void print_ct_config(char* txt, ct_config* ctc){
std::cout << txt << std::endl;
std::cout << "Number of dimensions " << (int) (ctc->ndims) << std::endl;
//std::cout << "Contract dimension " << (int) (ctc->contract_dim) << std::endl;
std::cout << "Cardinalities for each dimension of this configuration " << std::endl;
size_t i=0;
for ( i=0; i< ctc->ndims; i++){
std::cout << ctc->cardinalities[i] << " ";
}
std::cout << "\nTotal cardinality: " << ctc->total_cardinality << std::endl;
std::cout << "\nElement number: " << ctc->element_number << std::endl;
std::cout << std::endl << std::endl << std::endl;
}
void print_ct(char* txt, ct* ct, bool print_config=false, bool printdata=false){
std::cout << txt << std::endl;
if (print_config) print_ct_config(txt, ct->config);
std::cout << "Mem size " << ct->mem_size << std::endl;
std::cout << "Cardinalities for each dimension of this object "<< std::endl;
for (size_t i=0; i< ct->config->ndims; i++){
std::cout << ct->cardinalities[i] << " ";
}
std::cout << std::endl;
if (printdata){
std::cout << "Data" << std::endl;
for (size_t i=0; i< ct->config->element_number; i++){
std::cout << ct->data[i] << " ";
}
}
std::cout << std::endl << std::endl << std::endl;
}
// returns a dev_ct_ptrs struct with information about the cudatensor generated on the device
dev_ct_ptrs prepareDeviceTensor(ct_config* h_ctc, ct_config* d_ctc, ct* h_ct,
const mxArray* data, const mxArray* tensor_card = NULL){
// generate h_ct
h_ct->config = h_ctc;
h_ct->cardinalities = (size_t*) malloc(sizeof(size_t)*h_ctc->ndims);
// assign cardinalities for the tensor objects
const mwSize* dims_c = mxGetDimensions(data);
for (size_t i=0; i<h_ctc->ndims; i++){
if (tensor_card==NULL){
// we are doing hadamard multiplication, all tensors have same cardinalities
// or we are doing output tensor object, which as maximum cardinalities on all dimensions
h_ct->cardinalities[i] = dims_c[i];
std::cout << "H dim "<< i << " cardinality assignment: "
<< h_ct->cardinalities[i]
<< " <- " << dims_c[i]
<< std::endl;
}else{
// we are doing tensor contraction, tensors may have different cardinalities
h_ct->cardinalities[i] = ((float *)mxGetData(tensor_card))[i];
std::cout << "TC dim "<< i << " cardinality assignment: "
<< h_ct->cardinalities[i]
<< " <- " << ((float *)mxGetData(tensor_card))[i] << std::endl;
}
}
// assign h_ct host data
size_t elnum = (size_t) mxGetNumberOfElements(data);
std::cout << " prepareDeviceTensor elnum " << elnum << std::endl;
h_ct->mem_size= sizeof(float) * elnum;
h_ct->data = (float*)malloc( h_ct->mem_size );
memcpy(h_ct->data, (float*)mxGetData(data), h_ct->mem_size);
print_ct("prepareDeviceTensor h_ct",h_ct,false,true);
// allocate d_ct
ct* d_ct;
cutilSafeCall(hipMalloc((void**) &d_ct, sizeof(ct)));
// allocate d_ct contents
// config -> d_ctc
size_t* tmp_card;
cutilSafeCall(hipMalloc((void**)&tmp_card, sizeof(size_t)*h_ctc->ndims));
cutilSafeCall(hipMemcpy(tmp_card, h_ct->cardinalities, sizeof(size_t)*h_ctc->ndims ,hipMemcpyHostToDevice));
float* tmp_data;
cutilSafeCall(hipMalloc((void**)&tmp_data, h_ct->mem_size));
cutilSafeCall(hipMemcpy(tmp_data, h_ct->data, h_ct->mem_size, hipMemcpyHostToDevice));
// put contents of d_ct in their places on the device
hipLaunchKernelGGL(( assignCudatensor), dim3(1), dim3(1), 0, 0, d_ct, d_ctc, tmp_card, h_ct->mem_size, tmp_data);
dev_ct_ptrs dcp;
dcp.ct=d_ct;
dcp.ctc=d_ctc;
dcp.cardinalities=tmp_card;
dcp.data=tmp_data;
return dcp;
}
ct_config* ctcToDevice(ct_config* h_ctc){
// transfer to device
size_t* tmp_card;
cutilSafeCall(hipMalloc((void**)&tmp_card, sizeof(size_t)*h_ctc->ndims));
cutilSafeCall(hipMemcpy(tmp_card, h_ctc->cardinalities, sizeof(size_t)*h_ctc->ndims ,hipMemcpyHostToDevice));
ct_config* d_ctc;
cutilSafeCall(hipMalloc((void**) &d_ctc, sizeof(ct_config) ));
cutilSafeCall(hipMemcpy( d_ctc , h_ctc, sizeof(ct_config), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( assignCudatensorConfig), dim3(1),dim3(1), 0, 0, d_ctc, tmp_card);
return d_ctc;
}
ct_config* prepareDeviceTensorConfig(ct_config* h_ctc, const mxArray* sampleObject){
h_ctc->ndims = mxGetNumberOfDimensions(sampleObject);
h_ctc->cardinalities = (size_t*) malloc(sizeof(size_t)*h_ctc->ndims);
const mwSize *dims = mxGetDimensions(sampleObject);
h_ctc->total_cardinality = 1;
for (size_t i=0; i<h_ctc->ndims; i++){
h_ctc->cardinalities[i] = dims[i];
if(dims[i] != 0)
h_ctc->total_cardinality *= dims[i];
}
return ctcToDevice(h_ctc);
}
ct_config* getDeviceTensorContractConfig(ct_config* h_ctc, const mxArray* tensor1, const mxArray* tensor1_card, const mxArray* tensor2, const mxArray* tensor2_card){
h_ctc->ndims = mxGetNumberOfElements(tensor1_card); // assumes both objects of same size
h_ctc->cardinalities = (size_t*) malloc(sizeof(size_t)*h_ctc->ndims);
h_ctc->element_number = 0;
h_ctc->total_cardinality = 1;
float tmpcard1[h_ctc->ndims];
float tmptotalcard1=1;
float tmpcard2[h_ctc->ndims];
float tmptotalcard2=1;
for (size_t i=0; i<h_ctc->ndims; i++){
// assumes same total cardinality for all objects
if ( ((float*)mxGetData(tensor1_card))[i] != 0 )
h_ctc->total_cardinality *= ((float*)mxGetData(tensor1_card))[i];
else if (((float*)mxGetData(tensor2_card))[i] != 0)
h_ctc->total_cardinality *= ((float*)mxGetData(tensor2_card))[i];
tmpcard1[i] = ((float*)mxGetData(tensor1_card))[i];
if ( ((float*)mxGetData(tensor1_card))[i] != 0 )
tmptotalcard1 *= ((float*)mxGetData(tensor1_card))[i];
tmpcard2[i] = ((float*)mxGetData(tensor2_card))[i];
if ( ((float*)mxGetData(tensor2_card))[i] != 0 )
tmptotalcard2 *= ((float*)mxGetData(tensor2_card))[i];
}
if (tmptotalcard1 != tmptotalcard2){
std::cout << "input arguments have different number of elements, exiting" << std::endl;
}
std::cout << "element number <- " << tmptotalcard1 << std::endl;
h_ctc->element_number = tmptotalcard1;
for (size_t i=0; i<h_ctc->ndims; i++){
h_ctc->cardinalities[i] = ::max( ((float*)mxGetData(tensor1_card))[i] ,
((float*)mxGetData(tensor2_card))[i] );
}
return ctcToDevice(h_ctc);
}
void print_device_ctc(char* txt, ct_config* d_ctc){
ct_config tmp_ctc;
cutilSafeCall(hipMemcpy(&tmp_ctc, d_ctc, sizeof(ct_config), hipMemcpyDeviceToHost));
//print_ct_config(txt,&tmp_ctc); // must return pointer set from config copy operation and use that
}
void print_device_ct(char* txt,dev_ct_ptrs* dcp, ct* host_ct){
ct tmp_ct;
cutilSafeCall(hipMemcpy(&tmp_ct, dcp->ct, sizeof(ct), hipMemcpyDeviceToHost));
tmp_ct.config = (ct_config*) malloc( sizeof(ct_config) );
tmp_ct.cardinalities = (size_t*) malloc( host_ct->config->ndims );
tmp_ct.data = (float*) malloc(host_ct->mem_size);
cutilSafeCall(hipMemcpy(tmp_ct.data, dcp->data, host_ct->mem_size, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(tmp_ct.config, dcp->ctc, sizeof(ct_config), hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(tmp_ct.cardinalities, dcp->cardinalities, sizeof(size_t)*host_ct->config->ndims, hipMemcpyDeviceToHost));
print_ct(txt,&tmp_ct,false,true);
}
enum tensor_operation{
hadamard,
contract
};
void operate(ct_config* h_ctc, ct_config* d_ctc, const mxArray *prhs[], mxArray *plhs[], tensor_operation operation){
// input tensor A
ct h_it_A; dev_ct_ptrs d_A;
// input tensor B
ct h_it_B; dev_ct_ptrs d_B;
if (operation==hadamard){
// we are doing hadamard multiplication, all tensors have same cardinalities
std::cout << "d_A prepareDeviceTensor " << std::endl;
d_A=prepareDeviceTensor(h_ctc, d_ctc, &h_it_A, prhs[0]);
std::cout << "d_B prepareDeviceTensor " << std::endl;
d_B=prepareDeviceTensor(h_ctc, d_ctc, &h_it_B, prhs[1]);
}else if (operation==contract){
// we are doing tensor contraction, tensors may have different cardinalities
std::cout << "d_A prepareDeviceTensor " << std::endl;
d_A=prepareDeviceTensor(h_ctc, d_ctc, &h_it_A, prhs[0], prhs[1]);
std::cout << "d_B prepareDeviceTensor " << std::endl;
d_B=prepareDeviceTensor(h_ctc, d_ctc, &h_it_B, prhs[2], prhs[3]);
}
// output tensor C
ct h_ot_C;
ct h_ot_C_full;
//ct_config h_ctc_full;
//ct_config d_ctc_full;
dev_ct_ptrs d_C;
dev_ct_ptrs d_C_full;
mxArray* full_data = mxCreateNumericArray(h_ctc->ndims,h_ctc->cardinalities,mxSINGLE_CLASS,mxREAL);
mxArray* full_cardinalities = mxCreateNumericArray(h_ctc->ndims,h_ctc->cardinalities,mxSINGLE_CLASS,mxREAL);
float* f_c_ptr = (float*)mxGetData(full_cardinalities);
for(size_t i=0; i<h_ctc->ndims; i++ ){
f_c_ptr[i]=h_ctc->cardinalities[i];
}
// prepare MATLAB storage
float* m_C;
//size_t m_C_mem_size=1;
// calculate total cardinalities for all objects
if(operation == hadamard){
mwSize argMatDims[h_ctc->ndims];
for (size_t i=0; i<h_ctc->ndims; i++){
argMatDims[i] = h_ctc->cardinalities[i];
}
plhs[0] = mxCreateNumericArray(h_ctc->ndims,argMatDims,mxSINGLE_CLASS,mxREAL);
m_C = (float*) mxGetPr(plhs[0]);
std::cout << "d_C prepareDeviceTensor " << std::endl;
d_C=prepareDeviceTensor(h_ctc, d_ctc, &h_ot_C, plhs[0]);
}
else if (operation == contract){
size_t non_zero_dim_number=0;
for (size_t i=0; i<h_ctc->ndims; i++){
//std::cout << " non_zero_dim_number loop " << i ;
float tmpdimcard = ((float*)mxGetData(prhs[4]))[i];
if(tmpdimcard != 0) {
non_zero_dim_number++;
//std::cout << " tmpdimcard " << tmpdimcard << std::endl;
//m_C_mem_size *= tmpdimcard;
}
}
mwSize argMatDims[non_zero_dim_number];
size_t argMatDims_ind=0;
//std::cout << "C tensor init argMatDims with size " << non_zero_dim_number << std::endl;
//<< " m_C_mem_size " << m_C_mem_size << std::endl;
for (size_t i=0; i<h_ctc->ndims; i++){
float val=((float*)mxGetData(prhs[4]))[i];
//std::cout << "C tensor argMatDims[" << i << "] = " << val << " ";
if ( val != 0){ // skip dimensions with 0 cardinality
//std::cout << " assign " << std::endl;
argMatDims[argMatDims_ind] = val;
argMatDims_ind++;
}else{
//std::cout << " not assign " << std::endl;
}
}
plhs[0] = mxCreateNumericArray(non_zero_dim_number,argMatDims,mxSINGLE_CLASS,mxREAL);
//std::cout << "SELAM " << (size_t) mxGetNumberOfElements(plhs[0]) << std::endl;
m_C = (float*) mxGetPr(plhs[0]);
std::cout << "d_C prepareDeviceTensor " << std::endl;
d_C=prepareDeviceTensor(h_ctc, d_ctc, &h_ot_C, plhs[0], prhs[4]);
//std::cout << "SELAAM bu " << h_ctc->element_number << std::endl;
std::cout << "d_C_full prepareDeviceTensor " << std::endl;
d_C_full=prepareDeviceTensor(h_ctc, d_ctc, &h_ot_C_full, full_data, full_cardinalities);
}
bool printdata=true;
print_ct("Host A",&h_it_A,false,printdata);
print_ct("Host B",&h_it_B,false,printdata);
print_ct("Host C",&h_ot_C,false,printdata);
print_ct("Host C_full",&h_ot_C_full,false,printdata);
print_device_ct("Device A",&d_A, &h_it_A);
print_device_ct("Device B",&d_B, &h_it_B);
print_device_ct("Device C",&d_C, &h_ot_C);
print_device_ct("Device C_full",&d_C_full, &h_ot_C_full);
//cudaPrintfInit();
// allocate device memory for result
// kernel warmup
// if (operation == hadamard){
// hipLaunchKernelGGL(( tensorHadamard), dim3(blocks), dim3(threads) , 0, 0, d_C.ct, d_A.ct, d_B.ct);
// }else if (operation == contract){
// hipLaunchKernelGGL(( tensorContract), dim3(blocks), dim3(threads) , 0, 0, d_C.ct, d_A.ct, d_B.ct);
// }
hipDeviceSynchronize();
// create and start timer
std::cout << "Run Kernels...\n\n" << std::endl;
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutStartTimer(timer));
// execute the kernel
int nIter = 30;
//for (int j = 0; j < nIter; j++) {
if (operation == hadamard){
hipLaunchKernelGGL(( tensorHadamard), dim3(blocks), dim3(threads) , 0, 0, d_C.ct, d_A.ct, d_B.ct);
}else if (operation == contract){
hipLaunchKernelGGL(( tensorContract), dim3(blocks), dim3(threads) , 0, 0, d_C_full.ct, d_C.ct, d_A.ct, d_B.ct);
}
//}
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
hipDeviceSynchronize();
// stop and destroy timer
cutilCheckError(cutStopTimer(timer));
//double dSeconds = cutGetTimerValue(timer)/((double)nIter * 1000.0);
//double dNumOps = 2.0 * total_size;
//double gflops = 1.0e-9 * dNumOps/dSeconds;
//std::cout << "tensorMul, Throughput = "<< gflops << " GFlop/s, Time = " << dSeconds << " s, Size = " << dNumOps << " Ops, NumDevsUsed = 1, Workgroup = " << threads << "\n" ;
cutilCheckError(cutDeleteTimer(timer));
// copy result from device to host
float h_C_full_data[h_ctc->total_cardinality];
if(operation==hadamard){
cutilSafeCall(hipMemcpy(m_C, d_C.data, h_ot_C.mem_size, hipMemcpyDeviceToHost) ); // assumes same size
}
else if(operation==contract){
cutilSafeCall(hipMemcpy(m_C, d_C.data, h_ot_C.mem_size, hipMemcpyDeviceToHost) ); // assumes same size
cutilSafeCall(hipMemcpy(h_C_full_data, d_C_full.data, h_ot_C_full.mem_size, hipMemcpyDeviceToHost) );
}
// print C_full
// for (size_t i=0; i<h_ctc->total_cardinality; i++){
// std::cout << "C_full[" << i << "] = " << h_C_full_data[i] << std::endl;
// }
// clean up memory
//free(h_A);
//free(h_B);
//free(h_C);
//free(reference);
// wrong
//cutilSafeCall(hipFree());
//cutilSafeCall(hipFree(d_it_B));
//cutilSafeCall(hipFree(d_it_A)); //->C
print_device_ct("Result\nDevice C full",&d_C_full, &h_ot_C_full);
print_device_ct("Result\nDevice C",&d_C, &h_ot_C);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
hipDeviceReset();
// required to avoid memory leak?
//delete h_ctc->cardinalities;
//delete h_it_A.cardinalities;
//delete h_it_B.cardinalities;
}
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
std::cout << "mex: found " << nrhs << " number of arguments " << std::endl;
if (nrhs == 2){
// hadamard multiplication
std::cout << "mex: applying hadamard multiplication " << std::endl;
// ASSUMES target tensors are of the same dimension
ct_config h_ctc;
ct_config* d_ctc = prepareDeviceTensorConfig(&h_ctc,prhs[0]);
print_ct_config("Host ctc",&h_ctc);
operate(&h_ctc, d_ctc, prhs, plhs, hadamard);
}else if(nrhs==5){
// tensor contraction operation
std::cout << "mex: applying tensor contraction " << std::endl;
ct_config h_ctc;
ct_config* d_ctc = getDeviceTensorContractConfig(&h_ctc,prhs[0],prhs[1],prhs[2],prhs[3]);
print_ct_config("Host ctc", &h_ctc);
print_device_ctc("Device tmp ctc",d_ctc);
operate(&h_ctc, d_ctc, prhs, plhs, contract);
}else{
std::cout << "mex: wrong number of arguments " << std::endl;
}
}
void print( const mxArray *prhs[], float* output, int total_size){
std::cout << "\ntotal_size " << total_size << std::endl;
std::cout << std::endl << std::endl << "input A:" << std::endl;
for (int i=0; i<total_size ; i++){
std::cout << i << "\t" << ((float*)mxGetPr(prhs[0]))[i] << std::endl;
}
std::cout << std::endl << std::endl << "input B:" << std::endl;
for (int i=0; i<total_size ; i++){
std::cout << i << "\t" << ((float*)mxGetPr(prhs[1]))[i] << std::endl;
}
std::cout << std::endl << std::endl << "output C:" << std::endl;
for (int i=0; i<total_size ; i++){
std::cout << i << "\t" << output[i] << std::endl;
}
}
| e8b0c3c104f065f283d6c2df7b628179339c13b4.cu | /*
* author: ck
* 26.04.2011
*/
#include "mex.h"
#include "cublas.h"
#include "cutil_inline.h"
#include <iostream>
#include <algorithm>
#include "cuPrintf.cu"
#define BLOCK_SIZE 400
// setup execution parameters
//dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//dim3 grid(WC / threads.x, HC / threads.y);
int blocks=BLOCK_SIZE;
//int threads=400;
int threads=100;
// Tensor .* operation. Multiply corresponding entries of tensors A,B of same size
// Store the result in tensor C
// two operators are available
// hadamard product: multiplies each element of input objects elementwise
// C = A .* B
// requires two input tensors A, B as input
// contract product: performs matrix multiplication if elements are 2 dimensional
// C = A * B
// requires five input arguments A, A_cardinalities, B, B_cardinalities, C_cardinalities
// objects (A,B,C) must have same number of dimensions
void print( const mxArray *prhs[], float* output, int total_size);
// cuda tensor operation configuration object
struct ct_config{
// defines how many dimensions are there
size_t ndims;
// defines the maximum possible size of each dimension
// for all tensors using this configuration
// must be allocated dynamically as an array of type size_t
// size of the array must be equal to ndims
size_t* cardinalities;
// total size of the related objects
// maximum of cardinality of input objects
// cardinality for an object is found by multiplying object's cardinalities of each dimension
size_t total_cardinality;
// number of elements in the data
size_t element_number;
// index of the dimension to contract over
//size_t contract_dim;
};
// cuda tensor object
struct ct{
// related configuration object
ct_config* config;
// defines size of each dimension for this tensor
// must be allocated dynamically as an array of type size_t
// size of the array must be equal to config.ndims
size_t* cardinalities;
// size of the corresponding data
size_t mem_size;
// points to the values of this tensor
float* data;
};
// compact structure carying pointers to elements of a cudatensor on the device
struct dev_ct_ptrs{
ct* ct;
ct_config* ctc;
size_t* cardinalities;
float* data;
};
// multiply corresponding elemens of A, B tensors, put result in tensor C
__global__ void
tensorHadamard( ct* C, ct* A, ct* B)
{
// Block index
size_t bx = blockIdx.x;
//int by = blockIdx.y;
// Thread index
size_t tx = threadIdx.x;
//int ty = threadIdx.y;
size_t threadsPerblock = blockDim.x * blockDim.y * blockDim.z;
size_t thread_id = bx * threadsPerblock + tx;
if ( thread_id < A->config->element_number ){
C->data[thread_id] = A->data[thread_id] * B->data[thread_id];
}
}
// multiply corresponding elements and contract along specified dimension
__global__ void
tensorContract( ct* C_full, ct* C, ct* A, ct* B )
{
size_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
//size_t thread_id = threadIdx.x + (threadIdx.y * blockDim.x) + (threadIdx.x * threadIdx.y * blockDim.y);
//size_t block_id = blockIdx.x + (blockIdx.y * gridDim.x);
// assumes same total dimensions and cardinalities for all objects
size_t tot_card = A->config->total_cardinality;
size_t ndims = A->config->ndims;
if ( thread_id < tot_card ){
//extern __shared__ int C_shared[];
//size_t uclu[3];
//for (size_t i=0; i<3; i++) {uclu[i]=0; }
int index_number_A=0;
int index_number_B=0;
int index_number_C=0;
for (size_t obj=0; obj<2; obj++){
ct* p;
if (obj==0) p = A;
else if (obj==1) p = B;
//else if (obj==2) p = C;
size_t t_id_rem = thread_id;
size_t cumulative_offset_ind = 1;
size_t cumulative_offset_elnum = 1;
size_t cur_card_index=0;
for (size_t card_index=0; card_index < ndims; card_index++){
if ( t_id_rem == 0 ) break;
//cuPrintf("card_index %d t_id_rem %d cumulative_offset_ind %d\n",card_index, t_id_rem, cumulative_offset_ind);
//uclu[card_index] = (t_id_rem % p->config->cardinalities[card_index]);// * cumulative_offset_ind;
cur_card_index = (t_id_rem % p->config->cardinalities[card_index]);// * cumulative_offset_ind;
t_id_rem = (size_t) t_id_rem / p->config->cardinalities[card_index];
if (p->cardinalities[card_index] != 0){
// int olmazsa patliyor?
if (obj==0) index_number_A += (int)cur_card_index * (int)cumulative_offset_elnum;
else if (obj==1) index_number_B += (int)cur_card_index * (int)cumulative_offset_elnum;
//else if (obj==2) index_number_C += (int)cur_card_index * (int)cumulative_offset_elnum;
// increment cumulative offset with current dimension cardinality for next loop
// -1 for cardinalities are indexed from 1
//cumulative_offset_ind *= p->config->cardinalities[card_index] - 1 ;
cumulative_offset_elnum *= p->config->cardinalities[card_index] ;
}
}
}
size_t tmpB = B->data[index_number_B];
size_t tmpA= A->data[index_number_A];
size_t tmpC_full= C_full->data[thread_id];
//cuPrintf("C[%d] %d += A[%d] %d * B[%d] %d\n", thread_id, tmpC_full, index_number_A, tmpA, index_number_B, tmpB);
//~/arastir/cuda2/cudainstall/3.2/sdk/C/src/reduction/doc/reduction.pdf
// extern __shared__ int sdata[];
// // each thread loads one element from global to shared mem
// unsigned int tid = threadIdx.x;
// unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
// sdata[tid] = C->data[i];
__syncthreads();
// sdata[index_number_C] += A->data[index_number_A] * B->data[index_number_B];
//cuPrintf("C_full->data[%d] = %d ", thread_id, tmpA * tmpB);
C_full->data[thread_id] = A->data[index_number_A] * B->data[index_number_B];
// size_t tmpC_shared = C_shared[index_number_C];
// cuPrintf("C_shared[%d] %d += %d * %d\n", index_number_C, tmpC_shared, tmpA,tmpB);
// C_shared[index_number_C] += A->data[index_number_A] * B->data[index_number_B];
// __syncthreads();
// if(thread_id < 4){
// C->data[thread_id] = C_shared[thread_id];
// }
// contract on dimensions with zero cardinality
size_t cum_card=1;
bool foundone=false;
for (size_t card_index=0; card_index<ndims; card_index++){
size_t current_card=C->cardinalities[card_index];
if( current_card == 0 ) {
foundone=true;
// contract on this dimension
size_t C_ind=0;
for (size_t C_full_ind=0; C_full_ind < tot_card-1;){
size_t tmp=0;
for (size_t el=0; el<cum_card; el++){
size_t increment = el * (cum_card);
size_t tmpcf = C_full->data[C_full_ind + increment];
if(thread_id==0)
cuPrintf("C_full_ind %d: tmp %d += C_full->data[ %d + %d ] %d \n", C_full_ind, tmp , C_full_ind, increment , tmpcf);
tmp += tmpcf;
}
C->data[C_ind] = tmp;
C_ind++;
if (C_full_ind % cum_card == (cum_card-1) ){
C_full_ind += cum_card * (cum_card-1) + 1;
}else{
C_full_ind++;
}
}
}
cum_card *= current_card;
}
if (foundone == false){
C->data[thread_id] = C_full->data[thread_id];
}
// size_t tmpS= sdata[tid];
// cuPrintf("C %d\n",tmpS);
//tmpC= C->data[index_number_C];
//cuPrintf("C %d\n",tmpC);
//cuPrintf("uclu %d %d %d index_number_A %d index_number_B %d\n", uclu[0], uclu[1], uclu[2], (int)index_number_A, index_number_B);
//cuPrintf("A %d B %d C %d\n", index_number_A, index_number_B, index_number_C);
}
}
__global__ void
assignCudatensorConfig( ct_config* ctc, size_t* cards ){
ctc->cardinalities = cards;
}
__global__ void
assignCudatensor( ct* c, ct_config* ctc, size_t* cards, size_t mem_size, float* data){
c->config = ctc;
c->cardinalities = cards;
c->mem_size = mem_size;
c->data = data;
}
void print_ct_config(char* txt, ct_config* ctc){
std::cout << txt << std::endl;
std::cout << "Number of dimensions " << (int) (ctc->ndims) << std::endl;
//std::cout << "Contract dimension " << (int) (ctc->contract_dim) << std::endl;
std::cout << "Cardinalities for each dimension of this configuration " << std::endl;
size_t i=0;
for ( i=0; i< ctc->ndims; i++){
std::cout << ctc->cardinalities[i] << " ";
}
std::cout << "\nTotal cardinality: " << ctc->total_cardinality << std::endl;
std::cout << "\nElement number: " << ctc->element_number << std::endl;
std::cout << std::endl << std::endl << std::endl;
}
void print_ct(char* txt, ct* ct, bool print_config=false, bool printdata=false){
std::cout << txt << std::endl;
if (print_config) print_ct_config(txt, ct->config);
std::cout << "Mem size " << ct->mem_size << std::endl;
std::cout << "Cardinalities for each dimension of this object "<< std::endl;
for (size_t i=0; i< ct->config->ndims; i++){
std::cout << ct->cardinalities[i] << " ";
}
std::cout << std::endl;
if (printdata){
std::cout << "Data" << std::endl;
for (size_t i=0; i< ct->config->element_number; i++){
std::cout << ct->data[i] << " ";
}
}
std::cout << std::endl << std::endl << std::endl;
}
// returns a dev_ct_ptrs struct with information about the cudatensor generated on the device
dev_ct_ptrs prepareDeviceTensor(ct_config* h_ctc, ct_config* d_ctc, ct* h_ct,
const mxArray* data, const mxArray* tensor_card = NULL){
// generate h_ct
h_ct->config = h_ctc;
h_ct->cardinalities = (size_t*) malloc(sizeof(size_t)*h_ctc->ndims);
// assign cardinalities for the tensor objects
const mwSize* dims_c = mxGetDimensions(data);
for (size_t i=0; i<h_ctc->ndims; i++){
if (tensor_card==NULL){
// we are doing hadamard multiplication, all tensors have same cardinalities
// or we are doing output tensor object, which as maximum cardinalities on all dimensions
h_ct->cardinalities[i] = dims_c[i];
std::cout << "H dim "<< i << " cardinality assignment: "
<< h_ct->cardinalities[i]
<< " <- " << dims_c[i]
<< std::endl;
}else{
// we are doing tensor contraction, tensors may have different cardinalities
h_ct->cardinalities[i] = ((float *)mxGetData(tensor_card))[i];
std::cout << "TC dim "<< i << " cardinality assignment: "
<< h_ct->cardinalities[i]
<< " <- " << ((float *)mxGetData(tensor_card))[i] << std::endl;
}
}
// assign h_ct host data
size_t elnum = (size_t) mxGetNumberOfElements(data);
std::cout << " prepareDeviceTensor elnum " << elnum << std::endl;
h_ct->mem_size= sizeof(float) * elnum;
h_ct->data = (float*)malloc( h_ct->mem_size );
memcpy(h_ct->data, (float*)mxGetData(data), h_ct->mem_size);
print_ct("prepareDeviceTensor h_ct",h_ct,false,true);
// allocate d_ct
ct* d_ct;
cutilSafeCall(cudaMalloc((void**) &d_ct, sizeof(ct)));
// allocate d_ct contents
// config -> d_ctc
size_t* tmp_card;
cutilSafeCall(cudaMalloc((void**)&tmp_card, sizeof(size_t)*h_ctc->ndims));
cutilSafeCall(cudaMemcpy(tmp_card, h_ct->cardinalities, sizeof(size_t)*h_ctc->ndims ,cudaMemcpyHostToDevice));
float* tmp_data;
cutilSafeCall(cudaMalloc((void**)&tmp_data, h_ct->mem_size));
cutilSafeCall(cudaMemcpy(tmp_data, h_ct->data, h_ct->mem_size, cudaMemcpyHostToDevice));
// put contents of d_ct in their places on the device
assignCudatensor<<<1, 1>>>(d_ct, d_ctc, tmp_card, h_ct->mem_size, tmp_data);
dev_ct_ptrs dcp;
dcp.ct=d_ct;
dcp.ctc=d_ctc;
dcp.cardinalities=tmp_card;
dcp.data=tmp_data;
return dcp;
}
ct_config* ctcToDevice(ct_config* h_ctc){
// transfer to device
size_t* tmp_card;
cutilSafeCall(cudaMalloc((void**)&tmp_card, sizeof(size_t)*h_ctc->ndims));
cutilSafeCall(cudaMemcpy(tmp_card, h_ctc->cardinalities, sizeof(size_t)*h_ctc->ndims ,cudaMemcpyHostToDevice));
ct_config* d_ctc;
cutilSafeCall(cudaMalloc((void**) &d_ctc, sizeof(ct_config) ));
cutilSafeCall(cudaMemcpy( d_ctc , h_ctc, sizeof(ct_config), cudaMemcpyHostToDevice) );
assignCudatensorConfig<<<1,1>>>(d_ctc, tmp_card);
return d_ctc;
}
ct_config* prepareDeviceTensorConfig(ct_config* h_ctc, const mxArray* sampleObject){
h_ctc->ndims = mxGetNumberOfDimensions(sampleObject);
h_ctc->cardinalities = (size_t*) malloc(sizeof(size_t)*h_ctc->ndims);
const mwSize *dims = mxGetDimensions(sampleObject);
h_ctc->total_cardinality = 1;
for (size_t i=0; i<h_ctc->ndims; i++){
h_ctc->cardinalities[i] = dims[i];
if(dims[i] != 0)
h_ctc->total_cardinality *= dims[i];
}
return ctcToDevice(h_ctc);
}
ct_config* getDeviceTensorContractConfig(ct_config* h_ctc, const mxArray* tensor1, const mxArray* tensor1_card, const mxArray* tensor2, const mxArray* tensor2_card){
h_ctc->ndims = mxGetNumberOfElements(tensor1_card); // assumes both objects of same size
h_ctc->cardinalities = (size_t*) malloc(sizeof(size_t)*h_ctc->ndims);
h_ctc->element_number = 0;
h_ctc->total_cardinality = 1;
float tmpcard1[h_ctc->ndims];
float tmptotalcard1=1;
float tmpcard2[h_ctc->ndims];
float tmptotalcard2=1;
for (size_t i=0; i<h_ctc->ndims; i++){
// assumes same total cardinality for all objects
if ( ((float*)mxGetData(tensor1_card))[i] != 0 )
h_ctc->total_cardinality *= ((float*)mxGetData(tensor1_card))[i];
else if (((float*)mxGetData(tensor2_card))[i] != 0)
h_ctc->total_cardinality *= ((float*)mxGetData(tensor2_card))[i];
tmpcard1[i] = ((float*)mxGetData(tensor1_card))[i];
if ( ((float*)mxGetData(tensor1_card))[i] != 0 )
tmptotalcard1 *= ((float*)mxGetData(tensor1_card))[i];
tmpcard2[i] = ((float*)mxGetData(tensor2_card))[i];
if ( ((float*)mxGetData(tensor2_card))[i] != 0 )
tmptotalcard2 *= ((float*)mxGetData(tensor2_card))[i];
}
if (tmptotalcard1 != tmptotalcard2){
std::cout << "input arguments have different number of elements, exiting" << std::endl;
}
std::cout << "element number <- " << tmptotalcard1 << std::endl;
h_ctc->element_number = tmptotalcard1;
for (size_t i=0; i<h_ctc->ndims; i++){
h_ctc->cardinalities[i] = std::max( ((float*)mxGetData(tensor1_card))[i] ,
((float*)mxGetData(tensor2_card))[i] );
}
return ctcToDevice(h_ctc);
}
void print_device_ctc(char* txt, ct_config* d_ctc){
ct_config tmp_ctc;
cutilSafeCall(cudaMemcpy(&tmp_ctc, d_ctc, sizeof(ct_config), cudaMemcpyDeviceToHost));
//print_ct_config(txt,&tmp_ctc); // must return pointer set from config copy operation and use that
}
void print_device_ct(char* txt,dev_ct_ptrs* dcp, ct* host_ct){
ct tmp_ct;
cutilSafeCall(cudaMemcpy(&tmp_ct, dcp->ct, sizeof(ct), cudaMemcpyDeviceToHost));
tmp_ct.config = (ct_config*) malloc( sizeof(ct_config) );
tmp_ct.cardinalities = (size_t*) malloc( host_ct->config->ndims );
tmp_ct.data = (float*) malloc(host_ct->mem_size);
cutilSafeCall(cudaMemcpy(tmp_ct.data, dcp->data, host_ct->mem_size, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(tmp_ct.config, dcp->ctc, sizeof(ct_config), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(tmp_ct.cardinalities, dcp->cardinalities, sizeof(size_t)*host_ct->config->ndims, cudaMemcpyDeviceToHost));
print_ct(txt,&tmp_ct,false,true);
}
enum tensor_operation{
hadamard,
contract
};
void operate(ct_config* h_ctc, ct_config* d_ctc, const mxArray *prhs[], mxArray *plhs[], tensor_operation operation){
// input tensor A
ct h_it_A; dev_ct_ptrs d_A;
// input tensor B
ct h_it_B; dev_ct_ptrs d_B;
if (operation==hadamard){
// we are doing hadamard multiplication, all tensors have same cardinalities
std::cout << "d_A prepareDeviceTensor " << std::endl;
d_A=prepareDeviceTensor(h_ctc, d_ctc, &h_it_A, prhs[0]);
std::cout << "d_B prepareDeviceTensor " << std::endl;
d_B=prepareDeviceTensor(h_ctc, d_ctc, &h_it_B, prhs[1]);
}else if (operation==contract){
// we are doing tensor contraction, tensors may have different cardinalities
std::cout << "d_A prepareDeviceTensor " << std::endl;
d_A=prepareDeviceTensor(h_ctc, d_ctc, &h_it_A, prhs[0], prhs[1]);
std::cout << "d_B prepareDeviceTensor " << std::endl;
d_B=prepareDeviceTensor(h_ctc, d_ctc, &h_it_B, prhs[2], prhs[3]);
}
// output tensor C
ct h_ot_C;
ct h_ot_C_full;
//ct_config h_ctc_full;
//ct_config d_ctc_full;
dev_ct_ptrs d_C;
dev_ct_ptrs d_C_full;
mxArray* full_data = mxCreateNumericArray(h_ctc->ndims,h_ctc->cardinalities,mxSINGLE_CLASS,mxREAL);
mxArray* full_cardinalities = mxCreateNumericArray(h_ctc->ndims,h_ctc->cardinalities,mxSINGLE_CLASS,mxREAL);
float* f_c_ptr = (float*)mxGetData(full_cardinalities);
for(size_t i=0; i<h_ctc->ndims; i++ ){
f_c_ptr[i]=h_ctc->cardinalities[i];
}
// prepare MATLAB storage
float* m_C;
//size_t m_C_mem_size=1;
// calculate total cardinalities for all objects
if(operation == hadamard){
mwSize argMatDims[h_ctc->ndims];
for (size_t i=0; i<h_ctc->ndims; i++){
argMatDims[i] = h_ctc->cardinalities[i];
}
plhs[0] = mxCreateNumericArray(h_ctc->ndims,argMatDims,mxSINGLE_CLASS,mxREAL);
m_C = (float*) mxGetPr(plhs[0]);
std::cout << "d_C prepareDeviceTensor " << std::endl;
d_C=prepareDeviceTensor(h_ctc, d_ctc, &h_ot_C, plhs[0]);
}
else if (operation == contract){
size_t non_zero_dim_number=0;
for (size_t i=0; i<h_ctc->ndims; i++){
//std::cout << " non_zero_dim_number loop " << i ;
float tmpdimcard = ((float*)mxGetData(prhs[4]))[i];
if(tmpdimcard != 0) {
non_zero_dim_number++;
//std::cout << " tmpdimcard " << tmpdimcard << std::endl;
//m_C_mem_size *= tmpdimcard;
}
}
mwSize argMatDims[non_zero_dim_number];
size_t argMatDims_ind=0;
//std::cout << "C tensor init argMatDims with size " << non_zero_dim_number << std::endl;
//<< " m_C_mem_size " << m_C_mem_size << std::endl;
for (size_t i=0; i<h_ctc->ndims; i++){
float val=((float*)mxGetData(prhs[4]))[i];
//std::cout << "C tensor argMatDims[" << i << "] = " << val << " ";
if ( val != 0){ // skip dimensions with 0 cardinality
//std::cout << " assign " << std::endl;
argMatDims[argMatDims_ind] = val;
argMatDims_ind++;
}else{
//std::cout << " not assign " << std::endl;
}
}
plhs[0] = mxCreateNumericArray(non_zero_dim_number,argMatDims,mxSINGLE_CLASS,mxREAL);
//std::cout << "SELAM " << (size_t) mxGetNumberOfElements(plhs[0]) << std::endl;
m_C = (float*) mxGetPr(plhs[0]);
std::cout << "d_C prepareDeviceTensor " << std::endl;
d_C=prepareDeviceTensor(h_ctc, d_ctc, &h_ot_C, plhs[0], prhs[4]);
//std::cout << "SELAAM bu " << h_ctc->element_number << std::endl;
std::cout << "d_C_full prepareDeviceTensor " << std::endl;
d_C_full=prepareDeviceTensor(h_ctc, d_ctc, &h_ot_C_full, full_data, full_cardinalities);
}
bool printdata=true;
print_ct("Host A",&h_it_A,false,printdata);
print_ct("Host B",&h_it_B,false,printdata);
print_ct("Host C",&h_ot_C,false,printdata);
print_ct("Host C_full",&h_ot_C_full,false,printdata);
print_device_ct("Device A",&d_A, &h_it_A);
print_device_ct("Device B",&d_B, &h_it_B);
print_device_ct("Device C",&d_C, &h_ot_C);
print_device_ct("Device C_full",&d_C_full, &h_ot_C_full);
//cudaPrintfInit();
// allocate device memory for result
// kernel warmup
// if (operation == hadamard){
// tensorHadamard<<< blocks, threads >>>(d_C.ct, d_A.ct, d_B.ct);
// }else if (operation == contract){
// tensorContract<<< blocks, threads >>>(d_C.ct, d_A.ct, d_B.ct);
// }
cudaThreadSynchronize();
// create and start timer
std::cout << "Run Kernels...\n\n" << std::endl;
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutStartTimer(timer));
// execute the kernel
int nIter = 30;
//for (int j = 0; j < nIter; j++) {
if (operation == hadamard){
tensorHadamard<<< blocks, threads >>>(d_C.ct, d_A.ct, d_B.ct);
}else if (operation == contract){
tensorContract<<< blocks, threads >>>(d_C_full.ct, d_C.ct, d_A.ct, d_B.ct);
}
//}
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
cudaThreadSynchronize();
// stop and destroy timer
cutilCheckError(cutStopTimer(timer));
//double dSeconds = cutGetTimerValue(timer)/((double)nIter * 1000.0);
//double dNumOps = 2.0 * total_size;
//double gflops = 1.0e-9 * dNumOps/dSeconds;
//std::cout << "tensorMul, Throughput = "<< gflops << " GFlop/s, Time = " << dSeconds << " s, Size = " << dNumOps << " Ops, NumDevsUsed = 1, Workgroup = " << threads << "\n" ;
cutilCheckError(cutDeleteTimer(timer));
// copy result from device to host
float h_C_full_data[h_ctc->total_cardinality];
if(operation==hadamard){
cutilSafeCall(cudaMemcpy(m_C, d_C.data, h_ot_C.mem_size, cudaMemcpyDeviceToHost) ); // assumes same size
}
else if(operation==contract){
cutilSafeCall(cudaMemcpy(m_C, d_C.data, h_ot_C.mem_size, cudaMemcpyDeviceToHost) ); // assumes same size
cutilSafeCall(cudaMemcpy(h_C_full_data, d_C_full.data, h_ot_C_full.mem_size, cudaMemcpyDeviceToHost) );
}
// print C_full
// for (size_t i=0; i<h_ctc->total_cardinality; i++){
// std::cout << "C_full[" << i << "] = " << h_C_full_data[i] << std::endl;
// }
// clean up memory
//free(h_A);
//free(h_B);
//free(h_C);
//free(reference);
// wrong
//cutilSafeCall(cudaFree());
//cutilSafeCall(cudaFree(d_it_B));
//cutilSafeCall(cudaFree(d_it_A)); //->C
print_device_ct("Result\nDevice C full",&d_C_full, &h_ot_C_full);
print_device_ct("Result\nDevice C",&d_C, &h_ot_C);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
cudaThreadExit();
// required to avoid memory leak?
//delete h_ctc->cardinalities;
//delete h_it_A.cardinalities;
//delete h_it_B.cardinalities;
}
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
std::cout << "mex: found " << nrhs << " number of arguments " << std::endl;
if (nrhs == 2){
// hadamard multiplication
std::cout << "mex: applying hadamard multiplication " << std::endl;
// ASSUMES target tensors are of the same dimension
ct_config h_ctc;
ct_config* d_ctc = prepareDeviceTensorConfig(&h_ctc,prhs[0]);
print_ct_config("Host ctc",&h_ctc);
operate(&h_ctc, d_ctc, prhs, plhs, hadamard);
}else if(nrhs==5){
// tensor contraction operation
std::cout << "mex: applying tensor contraction " << std::endl;
ct_config h_ctc;
ct_config* d_ctc = getDeviceTensorContractConfig(&h_ctc,prhs[0],prhs[1],prhs[2],prhs[3]);
print_ct_config("Host ctc", &h_ctc);
print_device_ctc("Device tmp ctc",d_ctc);
operate(&h_ctc, d_ctc, prhs, plhs, contract);
}else{
std::cout << "mex: wrong number of arguments " << std::endl;
}
}
void print( const mxArray *prhs[], float* output, int total_size){
std::cout << "\ntotal_size " << total_size << std::endl;
std::cout << std::endl << std::endl << "input A:" << std::endl;
for (int i=0; i<total_size ; i++){
std::cout << i << "\t" << ((float*)mxGetPr(prhs[0]))[i] << std::endl;
}
std::cout << std::endl << std::endl << "input B:" << std::endl;
for (int i=0; i<total_size ; i++){
std::cout << i << "\t" << ((float*)mxGetPr(prhs[1]))[i] << std::endl;
}
std::cout << std::endl << std::endl << "output C:" << std::endl;
for (int i=0; i<total_size ; i++){
std::cout << i << "\t" << output[i] << std::endl;
}
}
|
7b74f3a0204fce7241f4d8ed8ae0b68b6c59e42e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <sys/utime.h>
struct Simulation {
int *board;
int *win;
int *rows;
int *columns;
int *possible;
};
__device__
void checkWin(Simulation *sim)
{
sim->win[0] = 0;
for (int j = 0; j < (sim->columns[0]); j++)
{
if (sim->board[0*sim->columns[0] + j] == sim->board[1*sim->columns[0] + j]
&& sim->board[0*sim->columns[0] + j] == sim->board[2*sim->columns[0] + j] && sim->board[0*sim->columns[0] + j] != 0)
{
sim->win[0] = sim->board[0*sim->columns[0] + j];
}
}
for (int j = 0; j < (sim->rows[0]); j++)
{
if (sim->board[j*sim->columns[0] + 0] == sim->board[j*sim->columns[0] + 1]
&& sim->board[j*sim->columns[0] + 0] == sim->board[j*sim->columns[0] + 2] && sim->board[0 * sim->columns[0] + j] != 0)
{
sim->win[0] = sim->board[j*sim->columns[0] + 0];
}
}
if (sim->board[0] == sim->board[4] && sim->board[0] == sim->board[8] && sim->board[0] != 0)
{
sim->win[0] = sim->board[0];
}
if (sim->board[2] == sim->board[4] && sim->board[2] == sim->board[6] && sim->board[2] != 0)
{
sim->win[0] = sim->board[2];
}
}
void printBoard(Simulation sim)
{
printf("columns[0]: %i", sim.columns[0]);
printf("\nrows[0]: %i\n", sim.rows[0]);
for (int i = 0; i < sim.rows[0]; i++)
{
for (int j = 0; j < sim.columns[0]; j++)
{
printf(" %i ", sim.board[i * sim.columns[0] + j]);
}
printf("\n");
}
printf("\n");
}
void createSim(Simulation *sim)
{
hipMallocManaged((void **)&sim->rows, sizeof(int));
hipMallocManaged((void **)&sim->columns, sizeof(int));
sim->rows[0] = 6;
sim->columns[0] = 7;
hipMallocManaged((void **)&sim->board, sizeof(int) * sim->columns[0] * sim->rows[0]);
for (int i = 0; i < sim->columns[0] * sim->rows[0]; i++)
{
sim->board[i] = 0;
}
hipMallocManaged((void **)&sim->possible, sizeof(int) * 8);
hipMallocManaged((void **)&sim->win, sizeof(int));
sim->win[0] = 0;
}
__device__
void possibleMoves(Simulation *sim)
{
int moves = 0;
for (int i = 0; i < sim->columns[0]; i++)
{
for (int j = 0; j < sim->rows[0]; j++)
{
if (sim->board[j*sim->columns[0] + i] == 0)
{
moves++;
sim->possible[moves] = j*sim->columns[0] + i;
}
}
}
sim->possible[0] = moves;
}
void resetBoard(Simulation *sim)
{
//printf("A");
//printf("\nrows[0]: %i, columns[0]: %i", sim.rows[0], sim.columns[0]);
for (int i = 0; i < sim->columns[0] * sim->rows[0]; i++)
{
//printf("B");
sim->board[i] = 0;
}
//printf("C");
sim->win[0] = 0;
sim->possible[0] = 1;
//printf("D");
}
__device__
int randomMove(Simulation *sim, int player, unsigned int seed)
{
float*r;
int threads = 1;
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen,
HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen,
1234ULL);
hiprandGenerateUniform(gen, r, 1);
int val = 0;
if (sim->possible[0] > 0)
{
int rInt = r[0] * sim->possible[0];
val = sim->possible[rInt + 1];
sim->board[val] = player;
}
else
{
val = 0;
}
/*hiprandState_t states;
//printf("\nSeed Setter: %i", threadIdx.x + blockIdx.x);
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
// threadIdx.x + blockIdx.x, /* the sequence number should be different for each core (unless you want all
// cores to get the same sequence of numbers for some reason - use thread id! */
// 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
// &states);/* [blockIdx.x * threads + threadIdx.x]);*/
/*int val = 0;
if (sim->possible[0] > 0)
{
int r = hiprand(&states) % sim->possible[0];
val = sim->possible[r + 1];
sim->board[val] = player;
}
else
{
val = 0;
}
*/
//free(&states);
return val;
//printf("The board value: %i\n", val);
}
__global__
void handleComputer(Simulation **sim, int seed)
{
//printf("A");
int player = -1;
//printf("B");
possibleMoves(sim[0]);
//printf("C");
randomMove(sim[0], player, seed);
//printf("D");
}
__device__
int resultOfMove(Simulation *sim, int seed)
{
int move = -1;
int player = 1;
if (sim->possible[0] != 0)
{
move = randomMove(sim, player, seed);
if (player == 1) { player = -1; }
else { player = 1; }
possibleMoves(sim);
//printf("9");
checkWin(sim);
}
while (sim->win[0] == 0 && sim->possible[0] != 0)
{
seed++;
//printf("\n7 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
randomMove(sim, player, seed);
//printf("\n8 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
if (player == 1) { player = -1; }
else { player = 1; }
//printf("\n9 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
possibleMoves(sim);
//printf("\n10 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
checkWin(sim);
//printf("\n11 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
}
//printf("\nDONE");
return move;
}
__global__
void computerMove(Simulation **sim, int runs, int blocks, int threads, unsigned int seed, int *move)
{
//printf("2");
//printf("\nsim[0] rows[0]: %i\n", sim[0]->rows[0]);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/*printf("\nStride: %i", stride);
printf("\nGridDim: %i", gridDim.x);
printf("\nBlockDim: %i", blockDim.x);
printf("\nIndex: %i", index);*/
//printf("3");
for (int i = index; i < runs; i += stride)
{
//seed++;
//printf("5");
possibleMoves(sim[i]);
//printf("\n6 + INDEX: %i", index);
int location = resultOfMove(sim[i], seed);
//printf("\nIndex: %i\nLOCATION: %i", index, location);
move[location] += sim[i]->win[0];
}
//printf("A");
}
__global__
void loopStarter(Simulation **sim)
{
possibleMoves(sim[0]);
checkWin(sim[0]);
}
int main(void)
{
const int runs = 4096;
int blockSize = 256;
int numBlocks = (runs + blockSize - 1) / blockSize;
// Allocate Unified Memory -- accessible from CPU or GPU
Simulation **sim;// = new Simulation*();
//createSim(sim);
hipMallocManaged(&sim, sizeof(Simulation)*runs);
for (int i = 0; i < runs; i++)
{
sim[i] = new Simulation();
hipMallocManaged(&sim[i], sizeof(Simulation));
createSim(sim[i]);
}
sim[0]->possible[0] = 1;
int wins = 0;
int ties = 0;
int losses = 0;
for (int i = 0; i < 1000; i++)
{
while (sim[0]->win[0] == 0 && sim[0]->possible[0] > 0)
{
int *board;
hipMallocManaged(&board, sizeof(int) * sim[0]->columns[0] * sim[0]->rows[0]);
for (int i = 0; i < sim[0]->columns[0] * sim[0]->rows[0]; i++)
{
board[i] = sim[0]->board[i];
}
int *move;// = new int[sim->columns[0] * sim->rows[0]];
hipMallocManaged(&move, sizeof(int) * sim[0]->columns[0] * sim[0]->rows[0]);
for (int i = 0; i < sim[0]->columns[0] * sim[0]->rows[0]; i++)
{
move[i] = 0;
}
srand(time(NULL));
//printf("Start");
computerMove << <numBlocks, blockSize >> > (sim, runs, numBlocks, blockSize, rand(), move);
hipDeviceSynchronize();
int maxValue = -1;
int max = -1;
for (int i = 0; i < runs; i++)
{
if (move[i] > maxValue)
{
maxValue = move[i];
max = i;
}
}
board[max] = 1;
sim[0]->board = board;
//printBoard(*sim[0]);
handleComputer << <1, 1 >> > (sim, rand());
hipDeviceSynchronize();
//printBoard(*sim[0]);
for (int i = 1; i < runs; i++)
{
sim[i]->board = sim[0]->board;
}
loopStarter << <1, 1 >> > (sim);
hipDeviceSynchronize();
//printBoard(*sim[0]);
hipFree(&board);
hipFree(&move);
//printf("\nPossible Moves: %i and Win?: %i\n", sim[0]->possible[0], sim[0]->win[0]);
}
if (sim[0]->win[0] == 1)
{
wins++;
}
else if (sim[0]->win[0] == 0)
{
ties++;
}
else
{
losses++;
}
for (int i = 0; i < runs; i++)
{
resetBoard(sim[i]);
}
printf("\nITERATION: %i, TOTAL WINS: %i Ties: %i Losses: %i", i, wins, ties, losses);
}
printf("\nWins By Player One: %i Ties: %i Losses: %i... of %i total games.", wins, ties, losses, 1000);
// Run kernel on 1M elements on the CPU
//hipFree(&sim);
return 0;
} | 7b74f3a0204fce7241f4d8ed8ae0b68b6c59e42e.cu | #include <iostream>
#include <math.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
#include <time.h>
#include <sys/utime.h>
struct Simulation {
int *board;
int *win;
int *rows;
int *columns;
int *possible;
};
__device__
void checkWin(Simulation *sim)
{
sim->win[0] = 0;
for (int j = 0; j < (sim->columns[0]); j++)
{
if (sim->board[0*sim->columns[0] + j] == sim->board[1*sim->columns[0] + j]
&& sim->board[0*sim->columns[0] + j] == sim->board[2*sim->columns[0] + j] && sim->board[0*sim->columns[0] + j] != 0)
{
sim->win[0] = sim->board[0*sim->columns[0] + j];
}
}
for (int j = 0; j < (sim->rows[0]); j++)
{
if (sim->board[j*sim->columns[0] + 0] == sim->board[j*sim->columns[0] + 1]
&& sim->board[j*sim->columns[0] + 0] == sim->board[j*sim->columns[0] + 2] && sim->board[0 * sim->columns[0] + j] != 0)
{
sim->win[0] = sim->board[j*sim->columns[0] + 0];
}
}
if (sim->board[0] == sim->board[4] && sim->board[0] == sim->board[8] && sim->board[0] != 0)
{
sim->win[0] = sim->board[0];
}
if (sim->board[2] == sim->board[4] && sim->board[2] == sim->board[6] && sim->board[2] != 0)
{
sim->win[0] = sim->board[2];
}
}
void printBoard(Simulation sim)
{
printf("columns[0]: %i", sim.columns[0]);
printf("\nrows[0]: %i\n", sim.rows[0]);
for (int i = 0; i < sim.rows[0]; i++)
{
for (int j = 0; j < sim.columns[0]; j++)
{
printf(" %i ", sim.board[i * sim.columns[0] + j]);
}
printf("\n");
}
printf("\n");
}
void createSim(Simulation *sim)
{
cudaMallocManaged((void **)&sim->rows, sizeof(int));
cudaMallocManaged((void **)&sim->columns, sizeof(int));
sim->rows[0] = 6;
sim->columns[0] = 7;
cudaMallocManaged((void **)&sim->board, sizeof(int) * sim->columns[0] * sim->rows[0]);
for (int i = 0; i < sim->columns[0] * sim->rows[0]; i++)
{
sim->board[i] = 0;
}
cudaMallocManaged((void **)&sim->possible, sizeof(int) * 8);
cudaMallocManaged((void **)&sim->win, sizeof(int));
sim->win[0] = 0;
}
__device__
void possibleMoves(Simulation *sim)
{
int moves = 0;
for (int i = 0; i < sim->columns[0]; i++)
{
for (int j = 0; j < sim->rows[0]; j++)
{
if (sim->board[j*sim->columns[0] + i] == 0)
{
moves++;
sim->possible[moves] = j*sim->columns[0] + i;
}
}
}
sim->possible[0] = moves;
}
void resetBoard(Simulation *sim)
{
//printf("A");
//printf("\nrows[0]: %i, columns[0]: %i", sim.rows[0], sim.columns[0]);
for (int i = 0; i < sim->columns[0] * sim->rows[0]; i++)
{
//printf("B");
sim->board[i] = 0;
}
//printf("C");
sim->win[0] = 0;
sim->possible[0] = 1;
//printf("D");
}
__device__
int randomMove(Simulation *sim, int player, unsigned int seed)
{
float*r;
int threads = 1;
curandGenerator_t gen;
curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen,
1234ULL);
curandGenerateUniform(gen, r, 1);
int val = 0;
if (sim->possible[0] > 0)
{
int rInt = r[0] * sim->possible[0];
val = sim->possible[rInt + 1];
sim->board[val] = player;
}
else
{
val = 0;
}
/*curandState_t states;
//printf("\nSeed Setter: %i", threadIdx.x + blockIdx.x);
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
// threadIdx.x + blockIdx.x, /* the sequence number should be different for each core (unless you want all
// cores to get the same sequence of numbers for some reason - use thread id! */
// 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
// &states);/* [blockIdx.x * threads + threadIdx.x]);*/
/*int val = 0;
if (sim->possible[0] > 0)
{
int r = curand(&states) % sim->possible[0];
val = sim->possible[r + 1];
sim->board[val] = player;
}
else
{
val = 0;
}
*/
//free(&states);
return val;
//printf("The board value: %i\n", val);
}
__global__
void handleComputer(Simulation **sim, int seed)
{
//printf("A");
int player = -1;
//printf("B");
possibleMoves(sim[0]);
//printf("C");
randomMove(sim[0], player, seed);
//printf("D");
}
__device__
int resultOfMove(Simulation *sim, int seed)
{
int move = -1;
int player = 1;
if (sim->possible[0] != 0)
{
move = randomMove(sim, player, seed);
if (player == 1) { player = -1; }
else { player = 1; }
possibleMoves(sim);
//printf("9");
checkWin(sim);
}
while (sim->win[0] == 0 && sim->possible[0] != 0)
{
seed++;
//printf("\n7 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
randomMove(sim, player, seed);
//printf("\n8 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
if (player == 1) { player = -1; }
else { player = 1; }
//printf("\n9 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
possibleMoves(sim);
//printf("\n10 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
checkWin(sim);
//printf("\n11 + INDEX: %i", blockIdx.x * blockDim.x + threadIdx.x);
}
//printf("\nDONE");
return move;
}
__global__
void computerMove(Simulation **sim, int runs, int blocks, int threads, unsigned int seed, int *move)
{
//printf("2");
//printf("\nsim[0] rows[0]: %i\n", sim[0]->rows[0]);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/*printf("\nStride: %i", stride);
printf("\nGridDim: %i", gridDim.x);
printf("\nBlockDim: %i", blockDim.x);
printf("\nIndex: %i", index);*/
//printf("3");
for (int i = index; i < runs; i += stride)
{
//seed++;
//printf("5");
possibleMoves(sim[i]);
//printf("\n6 + INDEX: %i", index);
int location = resultOfMove(sim[i], seed);
//printf("\nIndex: %i\nLOCATION: %i", index, location);
move[location] += sim[i]->win[0];
}
//printf("A");
}
__global__
void loopStarter(Simulation **sim)
{
possibleMoves(sim[0]);
checkWin(sim[0]);
}
int main(void)
{
const int runs = 4096;
int blockSize = 256;
int numBlocks = (runs + blockSize - 1) / blockSize;
// Allocate Unified Memory -- accessible from CPU or GPU
Simulation **sim;// = new Simulation*();
//createSim(sim);
cudaMallocManaged(&sim, sizeof(Simulation)*runs);
for (int i = 0; i < runs; i++)
{
sim[i] = new Simulation();
cudaMallocManaged(&sim[i], sizeof(Simulation));
createSim(sim[i]);
}
sim[0]->possible[0] = 1;
int wins = 0;
int ties = 0;
int losses = 0;
for (int i = 0; i < 1000; i++)
{
while (sim[0]->win[0] == 0 && sim[0]->possible[0] > 0)
{
int *board;
cudaMallocManaged(&board, sizeof(int) * sim[0]->columns[0] * sim[0]->rows[0]);
for (int i = 0; i < sim[0]->columns[0] * sim[0]->rows[0]; i++)
{
board[i] = sim[0]->board[i];
}
int *move;// = new int[sim->columns[0] * sim->rows[0]];
cudaMallocManaged(&move, sizeof(int) * sim[0]->columns[0] * sim[0]->rows[0]);
for (int i = 0; i < sim[0]->columns[0] * sim[0]->rows[0]; i++)
{
move[i] = 0;
}
srand(time(NULL));
//printf("Start");
computerMove << <numBlocks, blockSize >> > (sim, runs, numBlocks, blockSize, rand(), move);
cudaDeviceSynchronize();
int maxValue = -1;
int max = -1;
for (int i = 0; i < runs; i++)
{
if (move[i] > maxValue)
{
maxValue = move[i];
max = i;
}
}
board[max] = 1;
sim[0]->board = board;
//printBoard(*sim[0]);
handleComputer << <1, 1 >> > (sim, rand());
cudaDeviceSynchronize();
//printBoard(*sim[0]);
for (int i = 1; i < runs; i++)
{
sim[i]->board = sim[0]->board;
}
loopStarter << <1, 1 >> > (sim);
cudaDeviceSynchronize();
//printBoard(*sim[0]);
cudaFree(&board);
cudaFree(&move);
//printf("\nPossible Moves: %i and Win?: %i\n", sim[0]->possible[0], sim[0]->win[0]);
}
if (sim[0]->win[0] == 1)
{
wins++;
}
else if (sim[0]->win[0] == 0)
{
ties++;
}
else
{
losses++;
}
for (int i = 0; i < runs; i++)
{
resetBoard(sim[i]);
}
printf("\nITERATION: %i, TOTAL WINS: %i Ties: %i Losses: %i", i, wins, ties, losses);
}
printf("\nWins By Player One: %i Ties: %i Losses: %i... of %i total games.", wins, ties, losses, 1000);
// Run kernel on 1M elements on the CPU
//cudaFree(&sim);
return 0;
} |
8c8b699545d882f790645c50b60dec2dcdcefef1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scan.h"
#define MAX_BLOCK_SZ 128
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
__global__
void gpu_add_block_sums(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const size_t numElems)
{
//unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int d_block_sum_val = d_block_sums[blockIdx.x];
//unsigned int d_in_val_0 = 0;
//unsigned int d_in_val_1 = 0;
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
//if (2 * glbl_t_idx < numElems)
//{
// d_out[2 * glbl_t_idx] = d_in[2 * glbl_t_idx] + d_block_sum_val;
// if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in[2 * glbl_t_idx + 1] + d_block_sum_val;
//}
//if (2 * glbl_t_idx < numElems)
//{
// d_in_val_0 = d_in[2 * glbl_t_idx];
// if (2 * glbl_t_idx + 1 < numElems)
// d_in_val_1 = d_in[2 * glbl_t_idx + 1];
//}
//else
// return;
//__syncthreads();
//d_out[2 * glbl_t_idx] = d_in_val_0 + d_block_sum_val;
//if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in_val_1 + d_block_sum_val;
}
// Modified version of Mark Harris' implementation of the Blelloch scan
// according to https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__
void gpu_prescan(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory size
// must be a 2 * blockDim.x + blockDim.x/num_banks
s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(unsigned int* const d_out,
const unsigned int* const d_in,
const size_t numElems)
{
// Zero out d_out
checkCudaErrors(hipMemset(d_out, 0, numElems * sizeof(unsigned int)));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) ::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
hipLaunchKernelGGL(( gpu_prescan), dim3(grid_sz), dim3(block_sz), sizeof(unsigned int) * shmem_sz, 0, d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(hipMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_dummy_blocks_sums, 0, sizeof(unsigned int)));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
hipLaunchKernelGGL(( gpu_prescan), dim3(1), dim3(block_sz), sizeof(unsigned int) * shmem_sz, 0, d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(hipFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(hipMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice));
sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(hipFree(d_in_block_sums));
}
//// Uncomment to examine block sums
//unsigned int* h_block_sums = new unsigned int[grid_sz];
//checkCudaErrors(hipMemcpy(h_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToHost));
//std::cout << "Block sums: ";
//for (int i = 0; i < grid_sz; ++i)
//{
// std::cout << h_block_sums[i] << ", ";
//}
//std::cout << std::endl;
//std::cout << "Block sums length: " << grid_sz << std::endl;
//delete[] h_block_sums;
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
hipLaunchKernelGGL(( gpu_add_block_sums), dim3(grid_sz), dim3(block_sz), 0, 0, d_out, d_out, d_block_sums, numElems);
checkCudaErrors(hipFree(d_block_sums));
}
| 8c8b699545d882f790645c50b60dec2dcdcefef1.cu | #include "scan.h"
#define MAX_BLOCK_SZ 128
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
__global__
void gpu_add_block_sums(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const size_t numElems)
{
//unsigned int glbl_t_idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int d_block_sum_val = d_block_sums[blockIdx.x];
//unsigned int d_in_val_0 = 0;
//unsigned int d_in_val_1 = 0;
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
//if (2 * glbl_t_idx < numElems)
//{
// d_out[2 * glbl_t_idx] = d_in[2 * glbl_t_idx] + d_block_sum_val;
// if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in[2 * glbl_t_idx + 1] + d_block_sum_val;
//}
//if (2 * glbl_t_idx < numElems)
//{
// d_in_val_0 = d_in[2 * glbl_t_idx];
// if (2 * glbl_t_idx + 1 < numElems)
// d_in_val_1 = d_in[2 * glbl_t_idx + 1];
//}
//else
// return;
//__syncthreads();
//d_out[2 * glbl_t_idx] = d_in_val_0 + d_block_sum_val;
//if (2 * glbl_t_idx + 1 < numElems)
// d_out[2 * glbl_t_idx + 1] = d_in_val_1 + d_block_sum_val;
}
// Modified version of Mark Harris' implementation of the Blelloch scan
// according to https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf
__global__
void gpu_prescan(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory size
// must be a 2 * blockDim.x + blockDim.x/num_banks
s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(unsigned int* const d_out,
const unsigned int* const d_in,
const size_t numElems)
{
// Zero out d_out
checkCudaErrors(cudaMemset(d_out, 0, numElems * sizeof(unsigned int)));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
checkCudaErrors(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
checkCudaErrors(cudaMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_dummy_blocks_sums, 0, sizeof(unsigned int)));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
checkCudaErrors(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
checkCudaErrors(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
checkCudaErrors(cudaMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice));
sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz);
checkCudaErrors(cudaFree(d_in_block_sums));
}
//// Uncomment to examine block sums
//unsigned int* h_block_sums = new unsigned int[grid_sz];
//checkCudaErrors(cudaMemcpy(h_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToHost));
//std::cout << "Block sums: ";
//for (int i = 0; i < grid_sz; ++i)
//{
// std::cout << h_block_sums[i] << ", ";
//}
//std::cout << std::endl;
//std::cout << "Block sums length: " << grid_sz << std::endl;
//delete[] h_block_sums;
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz>>>(d_out, d_out, d_block_sums, numElems);
checkCudaErrors(cudaFree(d_block_sums));
}
|
3321cb4779cdab1073ffc16a69eafdc3ea437065.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
#include <ATen/jit_macros.h>
#include <ATen/OpMathType.h>
namespace at::native {
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct sum_functor {
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(
iter, func_wrapper<out_t>([] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}));
}
};
// jiterated specialization for `complex<Half>`
constexpr char sum_name[] = "sum";
template <>
struct sum_functor<c10::complex<at::Half>> {
// jiterator reduction fails on windows
// Ref: https://github.com/pytorch/pytorch/issues/77305
#if AT_USE_JITERATOR() && !defined(_MSC_VER)
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
std::string func = jiterator_stringify(
arg_t combine(arg_t a, arg_t b) {
return a + b;
}
);
jitted_gpu_reduce_kernel<sum_name, scalar_t, scalar_t>(
iter, func, 0.);
}
#else
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
using acc_t = at::opmath_type<scalar_t>;
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<scalar_t>([] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}), acc_t{0.});
}
#endif
};
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct nansum_functor {
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(
iter, NanSumOps<acc_t, out_t>{});
}
};
constexpr char prod_name[] = "prod";
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct prod_functor {
// jiterator reduction fails on windows
// Ref: https://github.com/pytorch/pytorch/issues/77305
#if AT_USE_JITERATOR() && !defined(_MSC_VER)
void operator()(TensorIterator& iter) {
std::string func = jiterator_stringify(
arg_t combine(arg_t a, arg_t b) {
return a * b;
}
);
jitted_gpu_reduce_kernel<prod_name, scalar_t, out_t>(
iter, func, 1.);
}
#else
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(
iter, func_wrapper<out_t>([] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a * b;
}), 1.);
}
#endif
};
// Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context]
template <>
struct prod_functor<bool> {
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<bool, bool>(
iter, func_wrapper<bool>([] GPU_LAMBDA(bool a, bool b) -> bool {
return a && b;
}), 1);
}
};
// jiterated specialization for `complex<Half>`
template <>
struct prod_functor<c10::complex<at::Half>> {
// jiterator reduction fails on windows
// Ref: https://github.com/pytorch/pytorch/issues/77305
#if AT_USE_JITERATOR() && !defined(_MSC_VER)
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
std::string func =
jiterator_stringify(arg_t combine(arg_t a, arg_t b) { return a * b; });
jitted_gpu_reduce_kernel<prod_name, scalar_t, scalar_t>(iter, func, 1.);
}
#else
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
using acc_t = at::opmath_type<scalar_t>;
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
func_wrapper<scalar_t>(
[] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t { return a * b; }),
acc_t{1.});
}
#endif
};
// The function `reduce_dispatch` below dispatches to the kernel based
// on the type of `iter`. It takes care of the common logic
// for handling Half-Precision floating types.
// Otherwise the functor `op` is called to dispatch to the kernel
// of relevant type.
//
// Note: Functor `op` should take care of all the types to be supported
// except for `at::Half` and `at::BFloat16`.
template <
template <
typename scalar_t,
typename acc_t = scalar_t,
typename out_t = scalar_t>
typename OpFunctor,
typename GeneralDispatcher>
static void reduce_dispatch(TensorIterator& iter, GeneralDispatcher op) {
if (iter.dtype() == kHalf) {
return OpFunctor<at::Half, float>{}(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return OpFunctor<at::Half, float, float>{}(iter);
} else if (iter.dtype() == kBFloat16) {
return OpFunctor<at::BFloat16, float>{}(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return OpFunctor<at::BFloat16, float, float>{}(iter);
}
op(iter);
}
static void sum_kernel_cuda(TensorIterator& iter){
auto general_dispatcher = [](TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kBool, kComplexHalf, iter.dtype(), "sum_cuda", [&]() {
sum_functor<scalar_t>{}(iter);
});
};
reduce_dispatch<sum_functor>(iter, general_dispatcher);
}
static void nansum_kernel_cuda(TensorIterator& iter) {
auto general_dispatcher = [](TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "nansum_cuda", [&]() {
nansum_functor<scalar_t>{}(iter);
});
};
reduce_dispatch<nansum_functor>(iter, general_dispatcher);
}
static void prod_kernel_cuda(TensorIterator& iter) {
auto general_dispatcher = [](TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kComplexHalf, kBool, iter.dtype(), "prod_cuda", [&]() {
prod_functor<scalar_t>{}(iter);
});
};
reduce_dispatch<prod_functor>(iter, general_dispatcher);
}
REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda);
REGISTER_DISPATCH(nansum_stub, &nansum_kernel_cuda);
REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda);
} // namespace at::native
| 3321cb4779cdab1073ffc16a69eafdc3ea437065.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
#include <ATen/jit_macros.h>
#include <ATen/OpMathType.h>
namespace at::native {
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct sum_functor {
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(
iter, func_wrapper<out_t>([] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}));
}
};
// jiterated specialization for `complex<Half>`
constexpr char sum_name[] = "sum";
template <>
struct sum_functor<c10::complex<at::Half>> {
// jiterator reduction fails on windows
// Ref: https://github.com/pytorch/pytorch/issues/77305
#if AT_USE_JITERATOR() && !defined(_MSC_VER)
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
std::string func = jiterator_stringify(
arg_t combine(arg_t a, arg_t b) {
return a + b;
}
);
jitted_gpu_reduce_kernel<sum_name, scalar_t, scalar_t>(
iter, func, 0.);
}
#else
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
using acc_t = at::opmath_type<scalar_t>;
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<scalar_t>([] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}), acc_t{0.});
}
#endif
};
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct nansum_functor {
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(
iter, NanSumOps<acc_t, out_t>{});
}
};
constexpr char prod_name[] = "prod";
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
struct prod_functor {
// jiterator reduction fails on windows
// Ref: https://github.com/pytorch/pytorch/issues/77305
#if AT_USE_JITERATOR() && !defined(_MSC_VER)
void operator()(TensorIterator& iter) {
std::string func = jiterator_stringify(
arg_t combine(arg_t a, arg_t b) {
return a * b;
}
);
jitted_gpu_reduce_kernel<prod_name, scalar_t, out_t>(
iter, func, 1.);
}
#else
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(
iter, func_wrapper<out_t>([] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a * b;
}), 1.);
}
#endif
};
// Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context]
template <>
struct prod_functor<bool> {
void operator()(TensorIterator& iter) {
gpu_reduce_kernel<bool, bool>(
iter, func_wrapper<bool>([] GPU_LAMBDA(bool a, bool b) -> bool {
return a && b;
}), 1);
}
};
// jiterated specialization for `complex<Half>`
template <>
struct prod_functor<c10::complex<at::Half>> {
// jiterator reduction fails on windows
// Ref: https://github.com/pytorch/pytorch/issues/77305
#if AT_USE_JITERATOR() && !defined(_MSC_VER)
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
std::string func =
jiterator_stringify(arg_t combine(arg_t a, arg_t b) { return a * b; });
jitted_gpu_reduce_kernel<prod_name, scalar_t, scalar_t>(iter, func, 1.);
}
#else
void operator()(TensorIterator& iter) {
using scalar_t = c10::complex<at::Half>;
using acc_t = at::opmath_type<scalar_t>;
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
func_wrapper<scalar_t>(
[] GPU_LAMBDA(acc_t a, acc_t b) -> acc_t { return a * b; }),
acc_t{1.});
}
#endif
};
// The function `reduce_dispatch` below dispatches to the kernel based
// on the type of `iter`. It takes care of the common logic
// for handling Half-Precision floating types.
// Otherwise the functor `op` is called to dispatch to the kernel
// of relevant type.
//
// Note: Functor `op` should take care of all the types to be supported
// except for `at::Half` and `at::BFloat16`.
template <
template <
typename scalar_t,
typename acc_t = scalar_t,
typename out_t = scalar_t>
typename OpFunctor,
typename GeneralDispatcher>
static void reduce_dispatch(TensorIterator& iter, GeneralDispatcher op) {
if (iter.dtype() == kHalf) {
return OpFunctor<at::Half, float>{}(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return OpFunctor<at::Half, float, float>{}(iter);
} else if (iter.dtype() == kBFloat16) {
return OpFunctor<at::BFloat16, float>{}(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return OpFunctor<at::BFloat16, float, float>{}(iter);
}
op(iter);
}
static void sum_kernel_cuda(TensorIterator& iter){
auto general_dispatcher = [](TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kBool, kComplexHalf, iter.dtype(), "sum_cuda", [&]() {
sum_functor<scalar_t>{}(iter);
});
};
reduce_dispatch<sum_functor>(iter, general_dispatcher);
}
static void nansum_kernel_cuda(TensorIterator& iter) {
auto general_dispatcher = [](TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "nansum_cuda", [&]() {
nansum_functor<scalar_t>{}(iter);
});
};
reduce_dispatch<nansum_functor>(iter, general_dispatcher);
}
static void prod_kernel_cuda(TensorIterator& iter) {
auto general_dispatcher = [](TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kComplexHalf, kBool, iter.dtype(), "prod_cuda", [&]() {
prod_functor<scalar_t>{}(iter);
});
};
reduce_dispatch<prod_functor>(iter, general_dispatcher);
}
REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda);
REGISTER_DISPATCH(nansum_stub, &nansum_kernel_cuda);
REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda);
} // namespace at::native
|
1f1c4264cfafbfb24f8c95e7c04564e54e2f7a50.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <string>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <fstream>
#include "MyCudaToolkit.h"
#include "RandomLUT.h"
using namespace std;
__global__ void getLUT(double *lut, hiprandStateXORWOW_t* states, unsigned long long seed, size_t size) {
unsigned int bid = blockIdx.x;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > size) return;
hiprand_init(seed, tid, 0, &states[tid]);
lut[tid] = hiprand_uniform_double(&states[tid]);
}
void PrintRndmLUT() {
//Get LookUpTable Size
const int dim = 2;
int dim_size[dim] = { 100, 50 };
int total_size = 1;
cout << "Printing random LUT with dimension: ";
for (int i = 0; i < dim; i++) {
cout << dim_size[i];
if (i < dim - 1) cout << " * " ;
total_size *= dim_size[i];
}
cout << endl;
//Define Block Size
dim3 block;
dim3 grid;
block.x = 1024;
grid.x = (total_size - 1) / block.x + 1;
//Allocate storage for LUT in host and device
double iStart, iElaps;
iStart = cpuSecond();
double* h_lut = nullptr;
double* d_lut = nullptr;
CHECK( hipMalloc((void**)&d_lut, total_size * sizeof(double)) );
h_lut = (double*)malloc(total_size * sizeof(double));
//Generate random numbers for lut
hiprandStateXORWOW_t* states;
CHECK( hipMalloc( (void**)&states, sizeof(hiprandStateXORWOW_t) * total_size) );
getLUT << <block, grid >> > (d_lut, states, time(nullptr), total_size);
CHECK( hipMemcpy(h_lut, d_lut, total_size * sizeof(double), hipMemcpyDeviceToHost) );
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("Time elapsed %f ms\n", iElaps);
//Output The LUT into binary format
//for (int i = 0; i < total_size; i++) {
// cout << h_lut[i] <<"\t";
//}
//cout << endl;
ofstream outFile("lut.dat", ios::out | ios::binary);
for (int i = 0; i < total_size; i++) {
outFile.write((char*)&h_lut[i], sizeof(h_lut[i]));
}
outFile.close();
hipFree(d_lut);
hipFree(states);
free(h_lut);
}
void PrintUniformLUT(int prob) {
const int dim = 2;
int dim_size[dim] = { 100, 50 };
int total_size = 1;
cout << "Printing uniform LUT with dimension: ";
for (int i = 0; i < dim; i++) {
cout << dim_size[i];
if (i < dim - 1) cout << " * ";
total_size *= dim_size[i];
}
cout << endl;
double* h_lut = (double*)malloc(sizeof(double) * total_size);
ofstream outFile("lut"+to_string(prob)+".dat", ios::out | ios::binary);
for (int i = 0; i < total_size; i++) {
h_lut[i] = prob / 100.;
outFile.write((char*)&h_lut[i], sizeof(h_lut[i]));
}
outFile.close();
free(h_lut);
} | 1f1c4264cfafbfb24f8c95e7c04564e54e2f7a50.cu | #include <iostream>
#include <vector>
#include <string>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <fstream>
#include "MyCudaToolkit.h"
#include "RandomLUT.h"
using namespace std;
__global__ void getLUT(double *lut, curandStateXORWOW_t* states, unsigned long long seed, size_t size) {
unsigned int bid = blockIdx.x;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > size) return;
curand_init(seed, tid, 0, &states[tid]);
lut[tid] = curand_uniform_double(&states[tid]);
}
void PrintRndmLUT() {
//Get LookUpTable Size
const int dim = 2;
int dim_size[dim] = { 100, 50 };
int total_size = 1;
cout << "Printing random LUT with dimension: ";
for (int i = 0; i < dim; i++) {
cout << dim_size[i];
if (i < dim - 1) cout << " * " ;
total_size *= dim_size[i];
}
cout << endl;
//Define Block Size
dim3 block;
dim3 grid;
block.x = 1024;
grid.x = (total_size - 1) / block.x + 1;
//Allocate storage for LUT in host and device
double iStart, iElaps;
iStart = cpuSecond();
double* h_lut = nullptr;
double* d_lut = nullptr;
CHECK( cudaMalloc((void**)&d_lut, total_size * sizeof(double)) );
h_lut = (double*)malloc(total_size * sizeof(double));
//Generate random numbers for lut
curandStateXORWOW_t* states;
CHECK( cudaMalloc( (void**)&states, sizeof(curandStateXORWOW_t) * total_size) );
getLUT << <block, grid >> > (d_lut, states, time(nullptr), total_size);
CHECK( cudaMemcpy(h_lut, d_lut, total_size * sizeof(double), cudaMemcpyDeviceToHost) );
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("Time elapsed %f ms\n", iElaps);
//Output The LUT into binary format
//for (int i = 0; i < total_size; i++) {
// cout << h_lut[i] <<"\t";
//}
//cout << endl;
ofstream outFile("lut.dat", ios::out | ios::binary);
for (int i = 0; i < total_size; i++) {
outFile.write((char*)&h_lut[i], sizeof(h_lut[i]));
}
outFile.close();
cudaFree(d_lut);
cudaFree(states);
free(h_lut);
}
void PrintUniformLUT(int prob) {
const int dim = 2;
int dim_size[dim] = { 100, 50 };
int total_size = 1;
cout << "Printing uniform LUT with dimension: ";
for (int i = 0; i < dim; i++) {
cout << dim_size[i];
if (i < dim - 1) cout << " * ";
total_size *= dim_size[i];
}
cout << endl;
double* h_lut = (double*)malloc(sizeof(double) * total_size);
ofstream outFile("lut"+to_string(prob)+".dat", ios::out | ios::binary);
for (int i = 0; i < total_size; i++) {
h_lut[i] = prob / 100.;
outFile.write((char*)&h_lut[i], sizeof(h_lut[i]));
}
outFile.close();
free(h_lut);
} |
698344ce6a875c52616ac8b6a86f7ac5130ac622.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaclaw5_update_q_cuda2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mbc = 1;
int mx = 1;
int my = 1;
int meqn = 1;
double dtdx = 1;
double dtdy = 1;
double *qold = NULL;
hipMalloc(&qold, XSIZE*YSIZE);
double *fm = NULL;
hipMalloc(&fm, XSIZE*YSIZE);
double *fp = NULL;
hipMalloc(&fp, XSIZE*YSIZE);
double *gm = NULL;
hipMalloc(&gm, XSIZE*YSIZE);
double *gp = NULL;
hipMalloc(&gp, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaclaw5_update_q_cuda2), dim3(gridBlock),dim3(threadBlock), 0, 0, mbc,mx,my,meqn,dtdx,dtdy,qold,fm,fp,gm,gp);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaclaw5_update_q_cuda2), dim3(gridBlock),dim3(threadBlock), 0, 0, mbc,mx,my,meqn,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaclaw5_update_q_cuda2), dim3(gridBlock),dim3(threadBlock), 0, 0, mbc,mx,my,meqn,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 698344ce6a875c52616ac8b6a86f7ac5130ac622.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaclaw5_update_q_cuda2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mbc = 1;
int mx = 1;
int my = 1;
int meqn = 1;
double dtdx = 1;
double dtdy = 1;
double *qold = NULL;
cudaMalloc(&qold, XSIZE*YSIZE);
double *fm = NULL;
cudaMalloc(&fm, XSIZE*YSIZE);
double *fp = NULL;
cudaMalloc(&fp, XSIZE*YSIZE);
double *gm = NULL;
cudaMalloc(&gm, XSIZE*YSIZE);
double *gp = NULL;
cudaMalloc(&gp, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaclaw5_update_q_cuda2<<<gridBlock,threadBlock>>>(mbc,mx,my,meqn,dtdx,dtdy,qold,fm,fp,gm,gp);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaclaw5_update_q_cuda2<<<gridBlock,threadBlock>>>(mbc,mx,my,meqn,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaclaw5_update_q_cuda2<<<gridBlock,threadBlock>>>(mbc,mx,my,meqn,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
90553e551edcbe595111a17446ce210b4a619b79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/platform/device_context.h>
#include "paddle/fluid/operators/math/detail/gru_gpu_kernel.h"
#include "paddle/fluid/operators/math/detail/gru_kernel.h"
#include "paddle/fluid/operators/math/gru_compute.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct GRUUnitFunctor<platform::CUDADeviceContext, T> {
static void compute(const platform::CUDADeviceContext &context,
GRUMetaValue<T> value, int frame_size, int batch_size,
const detail::ActivationType active_node,
const detail::ActivationType active_gate,
bool origin_mode) {
auto stream = context.stream();
dim3 threads;
dim3 grid;
if (batch_size == 1) {
if (context.GetComputeCapability() >= 70) {
if (frame_size < 16) {
constexpr int tiled_size = 8;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grid = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( detail::KeFastCollectiveGruGate<
T, tiled_size>), dim3(grid), dim3(threads), 0, stream,
value.gate_value, value.prev_out_value, value.gate_weight,
value.reset_output_value, frame_size, active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grid = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( detail::KeFastCollectiveGruOut<
T, tiled_size>), dim3(grid), dim3(threads), 0, stream,
value.state_weight, value.prev_out_value, value.output_value,
value.gate_value, value.reset_output_value, frame_size,
active_node, origin_mode);
} else {
constexpr int tiled_size = 16;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grid = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( detail::KeFastCollectiveGruGate<
T, tiled_size>), dim3(grid), dim3(threads), 0, stream,
value.gate_value, value.prev_out_value, value.gate_weight,
value.reset_output_value, frame_size, active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grid = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( detail::KeFastCollectiveGruOut<
T, tiled_size>), dim3(grid), dim3(threads), 0, stream,
value.state_weight, value.prev_out_value, value.output_value,
value.gate_value, value.reset_output_value, frame_size,
active_node, origin_mode);
}
return;
} else {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grid = dim3(frame_blocks, 1);
}
} else {
threads = dim3(32, 32);
grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(context);
if (value.prev_out_value) {
blas.GEMM(false, false, batch_size, frame_size * 2, frame_size, 1,
value.prev_out_value, frame_size, value.gate_weight,
frame_size * 2, 1, value.gate_value, frame_size * 3);
}
if (batch_size == 1) {
hipLaunchKernelGGL(( detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>,
/* is_batch= */ false,
T>), dim3(grid), dim3(threads), 0, stream,
detail::forward::gru_resetOutput<T>(), value.gate_value,
value.reset_output_value, value.prev_out_value, frame_size,
batch_size, active_gate);
} else {
hipLaunchKernelGGL(( detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>,
/* is_batch= */ true,
T>), dim3(grid), dim3(threads), 0, stream,
detail::forward::gru_resetOutput<T>(), value.gate_value,
value.reset_output_value, value.prev_out_value, frame_size,
batch_size, active_gate);
}
if (value.prev_out_value) {
blas.GEMM(false, false, batch_size, frame_size, frame_size, 1,
value.reset_output_value, frame_size, value.state_weight,
frame_size, 1, value.gate_value + frame_size * 2,
frame_size * 3);
}
if (batch_size == 1) {
hipLaunchKernelGGL(( detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>,
/* is_batch= */ false,
T>), dim3(grid), dim3(threads), 0, stream,
detail::forward::gru_finalOutput<T>(), value.gate_value,
value.prev_out_value, value.output_value, frame_size, batch_size,
active_node, origin_mode);
} else {
hipLaunchKernelGGL(( detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>,
/* is_batch= */ true,
T>), dim3(grid), dim3(threads), 0, stream,
detail::forward::gru_finalOutput<T>(), value.gate_value,
value.prev_out_value, value.output_value, frame_size, batch_size,
active_node, origin_mode);
}
}
};
template <typename T>
struct GRUUnitGradFunctor<platform::CUDADeviceContext, T> {
static void compute(const platform::CUDADeviceContext &context,
GRUMetaValue<T> value, GRUMetaGrad<T> grad,
int frame_size, int batch_size,
const detail::ActivationType active_node,
const detail::ActivationType active_gate,
bool origin_mode) {
auto stream = context.stream();
dim3 threads;
dim3 grid;
if (batch_size == 1) {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grid = dim3(frame_blocks, 1);
} else {
threads = dim3(32, 32);
grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (batch_size == 1) {
hipLaunchKernelGGL(( detail::KeGruBackwardStateGrad<
detail::backward::gru_stateGrad<T>,
/* is_batch= */ false>), dim3(grid), dim3(threads), 0, stream,
detail::backward::gru_stateGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.output_grad, frame_size, batch_size, active_node, origin_mode);
} else {
hipLaunchKernelGGL(( detail::KeGruBackwardStateGrad<
detail::backward::gru_stateGrad<T>,
/* is_batch= */ true>), dim3(grid), dim3(threads), 0, stream,
detail::backward::gru_stateGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.output_grad, frame_size, batch_size, active_node, origin_mode);
}
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(context);
if (value.prev_out_value && grad.prev_out_grad) {
blas.GEMM(false, true, batch_size, frame_size, frame_size, 1,
grad.gate_grad + frame_size * 2, frame_size * 3,
value.state_weight, frame_size, 0, grad.reset_output_grad,
frame_size);
if (grad.state_weight_grad) {
blas.GEMM(true, false, frame_size, frame_size, batch_size, 1,
value.reset_output_value, frame_size,
grad.gate_grad + frame_size * 2, frame_size * 3, 1,
grad.state_weight_grad, frame_size);
}
}
if (batch_size == 1) {
hipLaunchKernelGGL(( detail::KeGruBackwardResetGrad<
detail::backward::gru_resetGrad<T>,
/* is_batch= */ false>), dim3(grid), dim3(threads), 0, stream,
detail::backward::gru_resetGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.reset_output_grad, frame_size, batch_size, active_gate);
} else {
hipLaunchKernelGGL(( detail::KeGruBackwardResetGrad<
detail::backward::gru_resetGrad<T>,
/* is_batch= */ true>), dim3(grid), dim3(threads), 0, stream,
detail::backward::gru_resetGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.reset_output_grad, frame_size, batch_size, active_gate);
}
if (grad.prev_out_grad && value.prev_out_value) {
blas.GEMM(false, true, batch_size, frame_size, frame_size * 2, 1,
grad.gate_grad, frame_size * 3, value.gate_weight,
frame_size * 2, 1, grad.prev_out_grad, frame_size);
if (grad.gate_weight_grad) {
blas.GEMM(true, false, frame_size, frame_size * 2, batch_size, 1,
value.prev_out_value, frame_size, grad.gate_grad,
frame_size * 3, 1, grad.gate_weight_grad, frame_size * 2);
}
}
}
};
template struct GRUUnitFunctor<platform::CUDADeviceContext, float>;
template struct GRUUnitFunctor<platform::CUDADeviceContext, double>;
template struct GRUUnitGradFunctor<platform::CUDADeviceContext, float>;
template struct GRUUnitGradFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 90553e551edcbe595111a17446ce210b4a619b79.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/platform/device_context.h>
#include "paddle/fluid/operators/math/detail/gru_gpu_kernel.h"
#include "paddle/fluid/operators/math/detail/gru_kernel.h"
#include "paddle/fluid/operators/math/gru_compute.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct GRUUnitFunctor<platform::CUDADeviceContext, T> {
static void compute(const platform::CUDADeviceContext &context,
GRUMetaValue<T> value, int frame_size, int batch_size,
const detail::ActivationType active_node,
const detail::ActivationType active_gate,
bool origin_mode) {
auto stream = context.stream();
dim3 threads;
dim3 grid;
if (batch_size == 1) {
if (context.GetComputeCapability() >= 70) {
if (frame_size < 16) {
constexpr int tiled_size = 8;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grid = dim3(frame_blocks, 1);
detail::KeFastCollectiveGruGate<
T, tiled_size><<<grid, threads, 0, stream>>>(
value.gate_value, value.prev_out_value, value.gate_weight,
value.reset_output_value, frame_size, active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grid = dim3(frame_blocks, 1);
detail::KeFastCollectiveGruOut<
T, tiled_size><<<grid, threads, 0, stream>>>(
value.state_weight, value.prev_out_value, value.output_value,
value.gate_value, value.reset_output_value, frame_size,
active_node, origin_mode);
} else {
constexpr int tiled_size = 16;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grid = dim3(frame_blocks, 1);
detail::KeFastCollectiveGruGate<
T, tiled_size><<<grid, threads, 0, stream>>>(
value.gate_value, value.prev_out_value, value.gate_weight,
value.reset_output_value, frame_size, active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grid = dim3(frame_blocks, 1);
detail::KeFastCollectiveGruOut<
T, tiled_size><<<grid, threads, 0, stream>>>(
value.state_weight, value.prev_out_value, value.output_value,
value.gate_value, value.reset_output_value, frame_size,
active_node, origin_mode);
}
return;
} else {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grid = dim3(frame_blocks, 1);
}
} else {
threads = dim3(32, 32);
grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(context);
if (value.prev_out_value) {
blas.GEMM(false, false, batch_size, frame_size * 2, frame_size, 1,
value.prev_out_value, frame_size, value.gate_weight,
frame_size * 2, 1, value.gate_value, frame_size * 3);
}
if (batch_size == 1) {
detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>,
/* is_batch= */ false,
T><<<grid, threads, 0, stream>>>(
detail::forward::gru_resetOutput<T>(), value.gate_value,
value.reset_output_value, value.prev_out_value, frame_size,
batch_size, active_gate);
} else {
detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>,
/* is_batch= */ true,
T><<<grid, threads, 0, stream>>>(
detail::forward::gru_resetOutput<T>(), value.gate_value,
value.reset_output_value, value.prev_out_value, frame_size,
batch_size, active_gate);
}
if (value.prev_out_value) {
blas.GEMM(false, false, batch_size, frame_size, frame_size, 1,
value.reset_output_value, frame_size, value.state_weight,
frame_size, 1, value.gate_value + frame_size * 2,
frame_size * 3);
}
if (batch_size == 1) {
detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>,
/* is_batch= */ false,
T><<<grid, threads, 0, stream>>>(
detail::forward::gru_finalOutput<T>(), value.gate_value,
value.prev_out_value, value.output_value, frame_size, batch_size,
active_node, origin_mode);
} else {
detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>,
/* is_batch= */ true,
T><<<grid, threads, 0, stream>>>(
detail::forward::gru_finalOutput<T>(), value.gate_value,
value.prev_out_value, value.output_value, frame_size, batch_size,
active_node, origin_mode);
}
}
};
template <typename T>
struct GRUUnitGradFunctor<platform::CUDADeviceContext, T> {
static void compute(const platform::CUDADeviceContext &context,
GRUMetaValue<T> value, GRUMetaGrad<T> grad,
int frame_size, int batch_size,
const detail::ActivationType active_node,
const detail::ActivationType active_gate,
bool origin_mode) {
auto stream = context.stream();
dim3 threads;
dim3 grid;
if (batch_size == 1) {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grid = dim3(frame_blocks, 1);
} else {
threads = dim3(32, 32);
grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (batch_size == 1) {
detail::KeGruBackwardStateGrad<
detail::backward::gru_stateGrad<T>,
/* is_batch= */ false><<<grid, threads, 0, stream>>>(
detail::backward::gru_stateGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.output_grad, frame_size, batch_size, active_node, origin_mode);
} else {
detail::KeGruBackwardStateGrad<
detail::backward::gru_stateGrad<T>,
/* is_batch= */ true><<<grid, threads, 0, stream>>>(
detail::backward::gru_stateGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.output_grad, frame_size, batch_size, active_node, origin_mode);
}
auto blas = phi::funcs::GetBlas<platform::CUDADeviceContext, T>(context);
if (value.prev_out_value && grad.prev_out_grad) {
blas.GEMM(false, true, batch_size, frame_size, frame_size, 1,
grad.gate_grad + frame_size * 2, frame_size * 3,
value.state_weight, frame_size, 0, grad.reset_output_grad,
frame_size);
if (grad.state_weight_grad) {
blas.GEMM(true, false, frame_size, frame_size, batch_size, 1,
value.reset_output_value, frame_size,
grad.gate_grad + frame_size * 2, frame_size * 3, 1,
grad.state_weight_grad, frame_size);
}
}
if (batch_size == 1) {
detail::KeGruBackwardResetGrad<
detail::backward::gru_resetGrad<T>,
/* is_batch= */ false><<<grid, threads, 0, stream>>>(
detail::backward::gru_resetGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.reset_output_grad, frame_size, batch_size, active_gate);
} else {
detail::KeGruBackwardResetGrad<
detail::backward::gru_resetGrad<T>,
/* is_batch= */ true><<<grid, threads, 0, stream>>>(
detail::backward::gru_resetGrad<T>(), value.gate_value,
grad.gate_grad, value.prev_out_value, grad.prev_out_grad,
grad.reset_output_grad, frame_size, batch_size, active_gate);
}
if (grad.prev_out_grad && value.prev_out_value) {
blas.GEMM(false, true, batch_size, frame_size, frame_size * 2, 1,
grad.gate_grad, frame_size * 3, value.gate_weight,
frame_size * 2, 1, grad.prev_out_grad, frame_size);
if (grad.gate_weight_grad) {
blas.GEMM(true, false, frame_size, frame_size * 2, batch_size, 1,
value.prev_out_value, frame_size, grad.gate_grad,
frame_size * 3, 1, grad.gate_weight_grad, frame_size * 2);
}
}
}
};
template struct GRUUnitFunctor<platform::CUDADeviceContext, float>;
template struct GRUUnitFunctor<platform::CUDADeviceContext, double>;
template struct GRUUnitGradFunctor<platform::CUDADeviceContext, float>;
template struct GRUUnitGradFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
208567852e6d64145e568b124cad1b1aa970adba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/**
*
* PARAMETERS
*
*/
#define VERBOSE
//#define DRY_RUN
#define USE_CPU /*
#define PROFILE_CPU // */
//#define USE_GPU /*
#define PROFILE_GPU // */
#define CPU_OUTPUT_FILE "julia_cpu.ppm"
#define GPU_OUTPUT_FILE "julia_gpu.ppm"
#define JULIA_X -0.7817635270541083
#define JULIA_Y 0.13465681362725457
#define SCALE 1.6
#define DIM 4000
//#define PALE /*
#define WHITE // */
#define GRID_SIZE 1024 /*
#define GRID_SIZE_2D DIM,DIM // */
#define BLOCK_SIZE 129 /*
#define BLOCK_SIZE_2D 1,1 // */
/**
*
* CUDA UTILS
*
*/
#define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); }
inline void __cuda_try( hipError_t code, const char * file, int line, bool abort=true ) {
if (code != hipSuccess) {
fprintf(stderr, "GPU assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
*
* UTILS
*
*/
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE_2D
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE_2D
#endif
#define STR_EXPAND(...) #__VA_ARGS__
#define ARG(...) STR_EXPAND(__VA_ARGS__)
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator *( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator +( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
int julia_cpu( int x, int y ) {
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(JULIA_X, JULIA_Y);
cppComplex a(jx, jy);
int i = 0;
for(; i < 200; i ++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void cpu_draw( unsigned char * pixels ) {
#ifdef VERBOSE
printf("cpu drawing...\n");
#endif
#ifdef PROFILE_CPU
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++ y) {
pixels[x + y * DIM] = 255 * julia_cpu(x, y);
}
}
#ifdef PROFILE_CPU
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
#ifdef VERBOSE
printf("cpu drawing complete\n");
#endif
}
__global__ void kernel( unsigned char * ptr, int thread_size ) {
int t_id =
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
blockIdx.x
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
(blockIdx.x + blockIdx.y * gridDim.x)
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
* blockDim.x
+ threadIdx.x;
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
* (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x + threadIdx.x);
#endif
int offset = thread_size * t_id;
int i = 0;
cppComplex c(JULIA_X, JULIA_Y);
int x = (i + offset) % DIM;
int y = (i + offset) / DIM;
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
for(; i < thread_size && offset + i < DIM * DIM; i ++) {
cppComplex a(jx, jy);
int j = 0;
for(; j < 200; j ++){
a = a * a + c;
if (a.magnitude2() > 1000)
break;
}
if (j < 200)
ptr[offset + i] = 0;
else
ptr[offset + i] = 255;
x ++;
if (x == DIM) {
x = 0;
y ++;
jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
}
jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
}
}
void gpu_draw( unsigned char * gpu_pixels ) {
int n = DIM * DIM;
dim3 grid_dim(GRID_DIM);
dim3 block_dim(BLOCK_DIM);
int grid_size = grid_dim.x * grid_dim.y * grid_dim.z;
int block_size = block_dim.x * block_dim.y * block_dim.z;
int thread_size = (n + (grid_size * block_size - 1)) / (grid_size * block_size);
#ifdef VERBOSE
printf("gpu drawing...\n");
printf("problem size %d, grid dim "ARG(GRID_DIM)"=%d, block size "ARG(BLOCK_DIM)"=%d, thread size %d\n", n, grid_size, block_size, thread_size);
#endif
unsigned char * dev_bitmap;
cuda_try(hipMalloc((void **)&dev_bitmap, n * sizeof(unsigned char)));
#ifdef PROFILE_GPU
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipLaunchKernelGGL(( kernel), dim3(grid_dim),dim3(block_dim), 0, 0, dev_bitmap, thread_size);
#ifdef PROFILE_GPU
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
cuda_try(hipPeekAtLastError());
cuda_try(hipMemcpy(gpu_pixels, dev_bitmap, n * sizeof(unsigned char), hipMemcpyDeviceToHost));
cuda_try(hipFree(dev_bitmap));
#ifdef VERBOSE
printf("gpu drawing complete\n");
#endif
}
void draw_file( char * path, unsigned char * pixels ) {
FILE * f = fopen(path, "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y ++) {
for (int x = 0; x < DIM; x ++) {
#if !defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
#elif defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
#elif defined(WHITE) && !defined(PALE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
#else
#warning Make up your mind on the color!
#error You must choose either PALE, WHITE, or neither!
#endif
}
}
fclose(f);
}
int main( void ) {
#ifdef VERBOSE
printf("julia set of "ARG(JULIA_X)","ARG(JULIA_Y)" resolution "ARG(DIM)"*"ARG(DIM)" scale "ARG(SCALE)"\n");
#endif
#if defined(USE_CPU) || defined(PROFILE_CPU)
unsigned char * pixels = new unsigned char[DIM * DIM];
cpu_draw(pixels);
#if !defined(DRY_RUN)
draw_file(CPU_OUTPUT_FILE, pixels);
#endif
delete [] pixels;
#endif
#if defined(USE_GPU) || defined(PROFILE_GPU)
unsigned char *gpu_pixels = new unsigned char[DIM * DIM];
gpu_draw(gpu_pixels);
#if !defined(DRY_RUN)
draw_file(GPU_OUTPUT_FILE, gpu_pixels);
#endif
delete [] gpu_pixels;
#endif
}
| 208567852e6d64145e568b124cad1b1aa970adba.cu | #include <stdio.h>
/**
*
* PARAMETERS
*
*/
#define VERBOSE
//#define DRY_RUN
#define USE_CPU /*
#define PROFILE_CPU // */
//#define USE_GPU /*
#define PROFILE_GPU // */
#define CPU_OUTPUT_FILE "julia_cpu.ppm"
#define GPU_OUTPUT_FILE "julia_gpu.ppm"
#define JULIA_X -0.7817635270541083
#define JULIA_Y 0.13465681362725457
#define SCALE 1.6
#define DIM 4000
//#define PALE /*
#define WHITE // */
#define GRID_SIZE 1024 /*
#define GRID_SIZE_2D DIM,DIM // */
#define BLOCK_SIZE 129 /*
#define BLOCK_SIZE_2D 1,1 // */
/**
*
* CUDA UTILS
*
*/
#define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); }
inline void __cuda_try( cudaError_t code, const char * file, int line, bool abort=true ) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
*
* UTILS
*
*/
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
#define GRID_DIM GRID_SIZE_2D
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
#define BLOCK_DIM BLOCK_SIZE_2D
#endif
#define STR_EXPAND(...) #__VA_ARGS__
#define ARG(...) STR_EXPAND(__VA_ARGS__)
struct cppComplex {
float r;
float i;
__host__ __device__ cppComplex( float a, float b ) : r(a), i(b) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ cppComplex operator *( const cppComplex& a ) {
return cppComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__host__ __device__ cppComplex operator +( const cppComplex& a ) {
return cppComplex(r + a.r, i + a.i);
}
};
int julia_cpu( int x, int y ) {
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
cppComplex c(JULIA_X, JULIA_Y);
cppComplex a(jx, jy);
int i = 0;
for(; i < 200; i ++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void cpu_draw( unsigned char * pixels ) {
#ifdef VERBOSE
printf("cpu drawing...\n");
#endif
#ifdef PROFILE_CPU
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++ y) {
pixels[x + y * DIM] = 255 * julia_cpu(x, y);
}
}
#ifdef PROFILE_CPU
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
#ifdef VERBOSE
printf("cpu drawing complete\n");
#endif
}
__global__ void kernel( unsigned char * ptr, int thread_size ) {
int t_id =
#if defined(GRID_SIZE) && !defined(GRID_SIZE_2D)
blockIdx.x
#elif !defined(GRID_SIZE) && defined(GRID_SIZE_2D)
(blockIdx.x + blockIdx.y * gridDim.x)
#endif
#if defined(BLOCK_SIZE) && !defined(BLOCK_SIZE_2D)
* blockDim.x
+ threadIdx.x;
#elif !defined(BLOCK_SIZE) && defined(BLOCK_SIZE_2D)
* (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x + threadIdx.x);
#endif
int offset = thread_size * t_id;
int i = 0;
cppComplex c(JULIA_X, JULIA_Y);
int x = (i + offset) % DIM;
int y = (i + offset) / DIM;
float jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
float jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
for(; i < thread_size && offset + i < DIM * DIM; i ++) {
cppComplex a(jx, jy);
int j = 0;
for(; j < 200; j ++){
a = a * a + c;
if (a.magnitude2() > 1000)
break;
}
if (j < 200)
ptr[offset + i] = 0;
else
ptr[offset + i] = 255;
x ++;
if (x == DIM) {
x = 0;
y ++;
jy = SCALE * (float)(DIM / 2 - y) / (DIM / 2);
}
jx = SCALE * (float)(DIM / 2 - x) / (DIM / 2);
}
}
void gpu_draw( unsigned char * gpu_pixels ) {
int n = DIM * DIM;
dim3 grid_dim(GRID_DIM);
dim3 block_dim(BLOCK_DIM);
int grid_size = grid_dim.x * grid_dim.y * grid_dim.z;
int block_size = block_dim.x * block_dim.y * block_dim.z;
int thread_size = (n + (grid_size * block_size - 1)) / (grid_size * block_size);
#ifdef VERBOSE
printf("gpu drawing...\n");
printf("problem size %d, grid dim "ARG(GRID_DIM)"=%d, block size "ARG(BLOCK_DIM)"=%d, thread size %d\n", n, grid_size, block_size, thread_size);
#endif
unsigned char * dev_bitmap;
cuda_try(cudaMalloc((void **)&dev_bitmap, n * sizeof(unsigned char)));
#ifdef PROFILE_GPU
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
kernel<<<grid_dim,block_dim>>>(dev_bitmap, thread_size);
#ifdef PROFILE_GPU
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%f ms\n", time);
#endif
cuda_try(cudaPeekAtLastError());
cuda_try(cudaMemcpy(gpu_pixels, dev_bitmap, n * sizeof(unsigned char), cudaMemcpyDeviceToHost));
cuda_try(cudaFree(dev_bitmap));
#ifdef VERBOSE
printf("gpu drawing complete\n");
#endif
}
void draw_file( char * path, unsigned char * pixels ) {
FILE * f = fopen(path, "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y ++) {
for (int x = 0; x < DIM; x ++) {
#if !defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
#elif defined(PALE) && !defined(WHITE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
fputc(pixels[(y * DIM + x)] * 0.3, f);
#elif defined(WHITE) && !defined(PALE)
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
fputc(pixels[(y * DIM + x)] * 0.9, f);
#else
#warning Make up your mind on the color!
#error You must choose either PALE, WHITE, or neither!
#endif
}
}
fclose(f);
}
int main( void ) {
#ifdef VERBOSE
printf("julia set of "ARG(JULIA_X)","ARG(JULIA_Y)" resolution "ARG(DIM)"*"ARG(DIM)" scale "ARG(SCALE)"\n");
#endif
#if defined(USE_CPU) || defined(PROFILE_CPU)
unsigned char * pixels = new unsigned char[DIM * DIM];
cpu_draw(pixels);
#if !defined(DRY_RUN)
draw_file(CPU_OUTPUT_FILE, pixels);
#endif
delete [] pixels;
#endif
#if defined(USE_GPU) || defined(PROFILE_GPU)
unsigned char *gpu_pixels = new unsigned char[DIM * DIM];
gpu_draw(gpu_pixels);
#if !defined(DRY_RUN)
draw_file(GPU_OUTPUT_FILE, gpu_pixels);
#endif
delete [] gpu_pixels;
#endif
}
|
d74fdf22e7fe67e05788d5ca93af4bf2fdf608e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************************
Copyright:
Author:Xue Cheng
Date:2017-12-13
Description: Definition of Encapsulation of GPU gates
************************************************************************/
#include "GPUGatesWrapper.h"
#include "GPUGates.h"
using namespace std;
#define SET_BLOCKDIM BLOCKDIM = (1 << (psigpu.qnum - 1)) / THREADDIM;
static QSIZE getControllerMask(GATEGPU::Qnum& qnum, int target = 1)
{
int qnum_mask = 0;
// obtain the mask for controller qubit
for (auto iter = qnum.begin(); iter != qnum.end() - target; ++iter)
{
qnum_mask += (1 << *iter);
}
return qnum_mask;
}
int GATEGPU::devicecount()
{
int count;
hipGetDeviceCount(&count);
return count;
}
bool getSynchronizeResult(hipError_t cudaStatue, char * pcGate)
{
if (hipSuccess != cudaStatue)
{
cout << "err " << pcGate << " = " << hipGetErrorString(cudaStatue) << endl;
return false;
}
return true;
}
#define GET_SYN_RES(x) hipError_t cudaStatue = hipDeviceSynchronize();\
return getSynchronizeResult(cudaStatue,(x));
bool GATEGPU::destroyState(QState& psi, QState& psigpu, size_t stQnum)
{
if ((nullptr == psi.real) ||
(nullptr == psi.imag) ||
(nullptr == psigpu.real) ||
(nullptr == psigpu.imag))
{
return false;
}
if (stQnum < 30)
{
hipError_t cuda_status = hipFree(psigpu.real);
if (hipSuccess != cuda_status)
{
cout << "psigpu.real free error" << endl;
return false;
}
cuda_status = hipFree(psigpu.imag);
if (hipSuccess != cuda_status)
{
cout << "psigpu.imag free error" << endl;
return false;
}
free(psi.real);
free(psi.imag);
psi.real = nullptr;
psi.imag = nullptr;
psigpu.real = nullptr;
psigpu.imag = nullptr;
}
else
{
hipHostFree(psigpu.real);
hipHostFree(psigpu.imag);
psigpu.real = nullptr;
psigpu.imag = nullptr;
}
return true;
}
bool GATEGPU::clearState(QState& psi, QState& psigpu, size_t stQnum)
{
if ((nullptr == psi.real) ||
(nullptr == psi.imag) ||
(nullptr == psigpu.real) ||
(nullptr == psigpu.imag))
{
return false;
}
if (stQnum < 30)
{
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
else
{
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
return true;
}
bool GATEGPU::initstate(QState& psi, QState& psigpu, int qnum)
{
if (qnum >= 30)
{
if (nullptr == psi.real)
{
hipError_t cuda_status = hipHostMalloc(&psi.real, sizeof(double)*(1ll << qnum), hipHostMallocMapped);
if (cuda_status != hipSuccess)
{
printf("host alloc fail!\n");
return false;
}
hipHostGetDevicePointer(&psigpu.real, psi.real, 0);
}
if (nullptr == psi.imag)
{
hipError_t cuda_status1 = hipHostMalloc(&psi.imag, sizeof(double)*(1ll << qnum), hipHostMallocMapped);
if (cuda_status1 != hipSuccess)
{
printf("host alloc fail!\n");
hipHostFree(psi.real);
return false;
}
hipHostGetDevicePointer(&psigpu.imag, psi.imag, 0);
}
psi.qnum = qnum;
psigpu.qnum = qnum;
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
else
{
QSIZE Dim = 1 << qnum;
hipError_t cuda_status;
if (nullptr == psi.real)
{
psi.real = (STATE_T*)malloc(sizeof(STATE_T)*Dim);
}
if (nullptr == psi.imag)
{
psi.imag = (STATE_T*)malloc(sizeof(STATE_T)*Dim);
}
if (nullptr == psigpu.real)
{
cuda_status = hipMalloc((void**)&psigpu.real, sizeof(STATE_T)*Dim);
if (hipSuccess != cuda_status)
{
printf("psigpu.real alloc gpu memoery error!\n");
free(psi.real);
free(psi.imag);
return false;
}
}
if (nullptr == psigpu.imag)
{
cuda_status = hipMalloc((void**)&psigpu.imag, sizeof(STATE_T)*Dim);
if (hipSuccess != cuda_status)
{
printf("psigpu.imag alloc gpu memoery error!\n");
free(psi.real);
free(psi.imag);
hipFree(psigpu.real);
return false;
}
}
psigpu.qnum = qnum;
psi.qnum = qnum;
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
return true;
}
bool GATEGPU::unitarysingle(
QState& psigpu,
size_t qn,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
if (isConjugate)
{
STATE_T temp_real, temp_imag;
temp_real = matrix.real[1];
temp_imag = matrix.imag[1];
matrix.real[1] = matrix.real[2];
matrix.imag[1] = matrix.imag[2];
matrix.real[2] = temp_real; //convert
matrix.imag[2] = temp_imag; //convert
for (size_t i = 0; i < 4; i++)
{
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
// matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
double real00 = matrix.real[0];
double real01 = matrix.real[1];
double real10 = matrix.real[2];
double real11 = matrix.real[3];
double imag00 = matrix.imag[0];
double imag01 = matrix.imag[1];
double imag10 = matrix.imag[2];
double imag11 = matrix.imag[3];
//test
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::unitarysingle << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn, real00, real01, real10, real11, imag00, imag01, imag10, imag11);
return true;
}
return true;
}
bool GATEGPU::controlunitarysingle(
QState& psigpu,
Qnum& qnum,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
double costheta, sintheta;
if (isConjugate)
{
STATE_T temp_real, temp_imag;
temp_real = matrix.real[1];
temp_imag = matrix.imag[1];
matrix.real[1] = matrix.real[2];
matrix.imag[1] = matrix.imag[2];
matrix.real[2] = temp_real; //convert
matrix.imag[2] = temp_imag; //convert
for (size_t i = 0; i < 4; i++)
{
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
//matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
QSIZE target_qubit = 1 << qnum.back();
// 1 is for the control single gate
QSIZE mask = getControllerMask(qnum, 1);
double real00 = matrix.real[0];
double real01 = matrix.real[1];
double real10 = matrix.real[2];
double real11 = matrix.real[3];
double imag00 = matrix.imag[0];
double imag01 = matrix.imag[1];
double imag10 = matrix.imag[2];
double imag11 = matrix.imag[3];
QSIZE BLOCKDIM;
SET_BLOCKDIM;
BLOCKDIM = (1 << (psigpu.qnum)) / THREADDIM;
gpu::controlunitarysingle << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(
psigpu.real,
psigpu.imag,
1 << (psigpu.qnum),
target_qubit,
mask,
real00, real01, real10, real11, imag00, imag01, imag10, imag11
);
return true;
}
return true;
}
//unitary double gate
bool GATEGPU::unitarydouble(
QState& psigpu,
size_t qn_0,
size_t qn_1,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
if (isConjugate)
{
STATE_T temp_real, temp_imag;
for (size_t i = 0; i < 4; i++)
{
for (size_t j = i + 1; j < 4; j++)
{
temp_real = matrix.real[4 * i + j];
temp_imag = matrix.imag[4 * i + j];
matrix.real[4 * i + j] = matrix.real[4 * j + i];
matrix.imag[4 * i + j] = matrix.imag[4 * j + i];
matrix.real[4 * j + i] = temp_real;
matrix.imag[4 * j + i] = temp_imag;
}
}
for (size_t i = 0; i < 16; i++)
{
//matrix[i].imag = -matrix[i].imag;
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
//matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::unitarydouble << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn_0, 1 << qn_1,
matrix.real[0], matrix.real[1], matrix.real[2], matrix.real[3],
matrix.real[4], matrix.real[5], matrix.real[6], matrix.real[7],
matrix.real[8], matrix.real[9], matrix.real[10], matrix.real[11],
matrix.real[12], matrix.real[13], matrix.real[14], matrix.real[15],
matrix.imag[0], matrix.imag[1], matrix.imag[2], matrix.imag[3],
matrix.imag[4], matrix.imag[5], matrix.imag[6], matrix.imag[7],
matrix.imag[8], matrix.imag[9], matrix.imag[10], matrix.imag[11],
matrix.imag[12], matrix.imag[13], matrix.imag[14], matrix.imag[15]);
return true;
}
return true;
}
bool GATEGPU::controlunitarydouble(
QState& psigpu,
Qnum& qnum,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
double costheta, sintheta;
if (isConjugate)
{
STATE_T temp_real, temp_imag;
for (size_t i = 0; i < 4; i++)
{
for (size_t j = i + 1; j < 4; j++)
{
temp_real = matrix.real[4 * i + j];
temp_imag = matrix.imag[4 * i + j];
matrix.real[4 * i + j] = matrix.real[4 * j + i];
matrix.imag[4 * i + j] = matrix.imag[4 * j + i];
matrix.real[4 * j + i] = temp_real;
matrix.imag[4 * j + i] = temp_imag;
}
}
for (size_t i = 0; i < 16; i++)
{
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
//matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
QSIZE m = qnum.size();
QSIZE target0 = qnum[m - 2];
QSIZE target1 = qnum.back();
sort(qnum.begin(), qnum.end());
QSIZE *h_block, *d_block;
h_block = (QSIZE *)malloc(sizeof(QSIZE) * m);
if (nullptr == h_block)
{
fprintf(stderr, "malloc error\n");
return false;
}
for (QSIZE i = 0; i < m; i++)
{
h_block[i] = 1 << qnum[i];
}
hipError_t cuda_status;
cuda_status = hipMalloc((void **)&d_block, sizeof(QSIZE) * m);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipMalloc error\n");
free(h_block);
return false;
}
cuda_status = hipMemcpy(d_block, h_block, sizeof(QSIZE) * m, hipMemcpyHostToDevice);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipMemcpy error\n");
free(h_block);
hipFree(d_block);
return false;
}
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::controlunitarydouble << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), d_block, 1 << target0, 1 << target1, m,
matrix.real[0], matrix.real[1], matrix.real[2], matrix.real[3],
matrix.real[4], matrix.real[5], matrix.real[6], matrix.real[7],
matrix.real[8], matrix.real[9], matrix.real[10], matrix.real[11],
matrix.real[12], matrix.real[13], matrix.real[14], matrix.real[15],
matrix.imag[0], matrix.imag[1], matrix.imag[2], matrix.imag[3],
matrix.imag[4], matrix.imag[5], matrix.imag[6], matrix.imag[7],
matrix.imag[8], matrix.imag[9], matrix.imag[10], matrix.imag[11],
matrix.imag[12], matrix.imag[13], matrix.imag[14], matrix.imag[15]);
return true;
}
return true;
}
//qbReset
bool GATEGPU::qbReset(QState& psigpu, size_t qn, double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
hipError_t cuda_status;
double * resultgpu;
// hipHostMalloc((void **)&result, sizeof(STATE_T)*(psigpu.qnum-1))/THREADDIM, hipHostMallocMapped);
//hipHostGetDevicePointer(&resultgpu, result, 0);
cuda_status = hipMalloc((void **)&resultgpu, sizeof(STATE_T)*(1 << (psigpu.qnum - 1)) / THREADDIM);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipMalloc error\n");
return false;
}
double * probgpu, *prob;
cuda_status = hipHostMalloc((void **)&prob, sizeof(STATE_T), hipHostMallocMapped);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipHostMalloc error\n");
hipFree(resultgpu);
}
hipHostGetDevicePointer(&probgpu, prob, 0);
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::qubitprob << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM, THREADDIM * sizeof(STATE_T) >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn, resultgpu);
cuda_status = hipDeviceSynchronize();
gpu::probsum << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (resultgpu, probgpu);
cuda_status = hipDeviceSynchronize();
*prob = 1 / sqrt(*prob);
gpu::qubitcollapse0 << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn, *prob);
cuda_status = hipDeviceSynchronize();
hipFree(resultgpu);
hipHostFree(prob);
return getSynchronizeResult(cuda_status, "qReset");
}
return true;
}
int GATEGPU::qubitmeasure(QState& psigpu, QSIZE Block, double* &resultgpu, double* &probgpu)
{
//double * resultgpu;
QSIZE BLOCKDIM;
SET_BLOCKDIM
int count = (0 == BLOCKDIM) ? 1 : BLOCKDIM;
double prob;
hipError_t cuda_status;
if (nullptr == resultgpu)
{
cuda_status = hipMalloc(&resultgpu, sizeof(STATE_T)* count);
if (hipSuccess != cuda_status)
{
cout << "resultgpu " << hipGetErrorString(cuda_status) << endl;
return -1;
}
}
if (nullptr == probgpu)
{
cuda_status = hipMalloc(&probgpu, sizeof(STATE_T));
if (hipSuccess != cuda_status)
{
cout << "probgpu " << hipGetErrorString(cuda_status) << endl;
hipFree(resultgpu);
resultgpu = nullptr;
return -1;
}
}
gpu::qubitprob << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM, THREADDIM * sizeof(STATE_T) >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), Block, resultgpu);
cuda_status = hipDeviceSynchronize();
if (hipSuccess != cuda_status)
{
cout << hipGetErrorString(cuda_status) << endl;
return -1;
}
gpu::probsum << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (resultgpu, probgpu);
cuda_status = hipDeviceSynchronize();
if (hipSuccess != cuda_status)
{
cout << hipGetErrorString(cuda_status) << endl;
return -1;
}
cuda_status = hipMemcpy(&prob, probgpu, sizeof(STATE_T), hipMemcpyDeviceToHost);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipMemcpy error\n");
return -1;
}
cuda_status = hipDeviceSynchronize();
if (hipSuccess != cuda_status)
{
cout << hipGetErrorString(cuda_status) << endl;
return -1;
}
int outcome = 0;
if (gpu::randGenerator() > prob)
{
outcome = 1;
}
if (0 == outcome)
{
prob = 1 / sqrt(prob);
gpu::qubitcollapse0 << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), Block, prob);
}
else
{
prob = 1 / sqrt(1 - prob);
gpu::qubitcollapse1 << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), Block, prob);
}
cuda_status = hipDeviceSynchronize();
getSynchronizeResult(cuda_status, "qubitmeasure");
return outcome;
}//checked
bool probcompare(pair<size_t, double>& a, pair<size_t, double>& b)
{
return a.second > b.second;
}
bool GATEGPU::pMeasurenew(
QState& psigpu,
vector<pair<size_t, double>>& vprob,
Qnum& qnum,
int select_max)
{
return false;
}
static bool pMeasure_few_target(GATEGPU::QState&, vector<double>&, GATEGPU::Qnum&);
static bool pMeasure_many_target(GATEGPU::QState&, vector<double>&, GATEGPU::Qnum&);
bool GATEGPU::pMeasure_no_index(
QState& psigpu,
vector<double> &mResult,
Qnum& qnum)
{
// 10 1024many_target
if (qnum.size() < 10)
{
return pMeasure_few_target(psigpu, mResult, qnum);
}
else
{
return pMeasure_many_target(psigpu, mResult, qnum);
}
}
static bool pMeasure_few_target(GATEGPU::QState& psigpu, vector<double>& mResult, GATEGPU::Qnum& qnum)
{
QSIZE result_size = 1 << qnum.size();
mResult.resize(result_size);
QSIZE Dim = 1 << psigpu.qnum; //
QSIZE BLOCKDIM;
BLOCKDIM = Dim / result_size;
hipError_t cudaStatus;
// BLOCKDIM0few targetresult_size<10
//
BLOCKDIM = (BLOCKDIM == 0 ? 1 : BLOCKDIM);
STATE_T* result_gpu;
cudaStatus = hipMalloc(&result_gpu, sizeof(STATE_T)*BLOCKDIM);
STATE_T* result_cpu;
result_cpu = (STATE_T*)malloc(sizeof(STATE_T)*BLOCKDIM);
QSIZE qnum_mask = 0;
// obtain the mask for pMeasure qubit
for (auto iter : qnum)
{
qnum_mask += (1 << iter);
}
QSIZE SHARED_SIZE = THREADDIM * sizeof(STATE_T);
for (int result_idx = 0; result_idx < result_size; ++result_idx)
{
gpu::pmeasure_one_target << < BLOCKDIM, THREADDIM, SHARED_SIZE >> > (
psigpu.real,
psigpu.imag,
result_gpu,
qnum_mask,
result_idx,
qnum.size(),
Dim);
cudaStatus = hipMemcpy(result_cpu, result_gpu, sizeof(STATE_T)*BLOCKDIM, hipMemcpyDeviceToHost);
STATE_T result_sum = 0;
for (int i = 0; i < BLOCKDIM; ++i)
{
result_sum += result_cpu[i];
}
mResult[result_idx] = result_sum;
}
hipFree(result_gpu);
free(result_cpu);
return true;
}
static bool pMeasure_many_target(GATEGPU::QState& psigpu, vector<double>& mResult, GATEGPU::Qnum& qnum)
{
QSIZE qnum_mask = 0;
// obtain the mask for pMeasure qubit
for (auto iter : qnum)
{
qnum_mask += (1 << iter);
}
QSIZE result_size = 1 << qnum.size();
mResult.resize(result_size);
// allocate the graphics memory for result
STATE_T* result_gpu;
hipError_t cudaStatus;
cudaStatus = hipMalloc(&result_gpu, result_size * sizeof(STATE_T));
cudaStatus = hipMemset(result_gpu, 0, result_size * sizeof(STATE_T));
QSIZE BLOCKDIM;
BLOCKDIM = result_size / THREADDIM;
hipLaunchKernelGGL(( gpu::pmeasure_many_target) , dim3((BLOCKDIM == 0 ? 1 : BLOCKDIM)), dim3(THREADDIM) , 0, 0,
psigpu.real,
psigpu.imag,
result_gpu,
qnum_mask,
result_size,
1 << (psigpu.qnum));
STATE_T* result = (STATE_T*)malloc(result_size * sizeof(STATE_T));
//hipMemcpy(result, result_gpu, result_size * sizeof(STATE_T), hipMemcpyDeviceToHost);
//for (QSIZE i = 0; i < result_size; ++i)
//{
// mResult[i] = result[i];
//}
cudaStatus = hipMemcpy(&(mResult[0]), result_gpu, result_size * sizeof(STATE_T), hipMemcpyDeviceToHost);
hipFree(result_gpu);
return true;
}
bool GATEGPU::getState(QState &psi, QState &psigpu, int qnum)
{
hipError_t cuda_status;
if (qnum < 30)
{
QSIZE Dim = 1 << qnum;
cuda_status = hipMemcpy(psi.real, psigpu.real, sizeof(STATE_T)*Dim, hipMemcpyDeviceToHost);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipMemcpy error\n");
return false;
}
cuda_status = hipMemcpy(psi.imag, psigpu.imag, sizeof(STATE_T)*Dim, hipMemcpyDeviceToHost);
if (hipSuccess != cuda_status)
{
fprintf(stderr, "hipMemcpy error\n");
return false;
}
}
return true;
}
void GATEGPU::gpuFree(double* memory)
{
if (memory != nullptr)
{
hipFree(memory);
}
}
| d74fdf22e7fe67e05788d5ca93af4bf2fdf608e1.cu |
/***********************************************************************
Copyright:
Author:Xue Cheng
Date:2017-12-13
Description: Definition of Encapsulation of GPU gates
************************************************************************/
#include "GPUGatesWrapper.h"
#include "GPUGates.h"
using namespace std;
#define SET_BLOCKDIM BLOCKDIM = (1 << (psigpu.qnum - 1)) / THREADDIM;
static QSIZE getControllerMask(GATEGPU::Qnum& qnum, int target = 1)
{
int qnum_mask = 0;
// obtain the mask for controller qubit
for (auto iter = qnum.begin(); iter != qnum.end() - target; ++iter)
{
qnum_mask += (1 << *iter);
}
return qnum_mask;
}
int GATEGPU::devicecount()
{
int count;
cudaGetDeviceCount(&count);
return count;
}
bool getSynchronizeResult(cudaError_t cudaStatue, char * pcGate)
{
if (cudaSuccess != cudaStatue)
{
cout << "err " << pcGate << " = " << cudaGetErrorString(cudaStatue) << endl;
return false;
}
return true;
}
#define GET_SYN_RES(x) cudaError_t cudaStatue = cudaDeviceSynchronize();\
return getSynchronizeResult(cudaStatue,(x));
bool GATEGPU::destroyState(QState& psi, QState& psigpu, size_t stQnum)
{
if ((nullptr == psi.real) ||
(nullptr == psi.imag) ||
(nullptr == psigpu.real) ||
(nullptr == psigpu.imag))
{
return false;
}
if (stQnum < 30)
{
cudaError_t cuda_status = cudaFree(psigpu.real);
if (cudaSuccess != cuda_status)
{
cout << "psigpu.real free error" << endl;
return false;
}
cuda_status = cudaFree(psigpu.imag);
if (cudaSuccess != cuda_status)
{
cout << "psigpu.imag free error" << endl;
return false;
}
free(psi.real);
free(psi.imag);
psi.real = nullptr;
psi.imag = nullptr;
psigpu.real = nullptr;
psigpu.imag = nullptr;
}
else
{
cudaFreeHost(psigpu.real);
cudaFreeHost(psigpu.imag);
psigpu.real = nullptr;
psigpu.imag = nullptr;
}
return true;
}
bool GATEGPU::clearState(QState& psi, QState& psigpu, size_t stQnum)
{
if ((nullptr == psi.real) ||
(nullptr == psi.imag) ||
(nullptr == psigpu.real) ||
(nullptr == psigpu.imag))
{
return false;
}
if (stQnum < 30)
{
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
else
{
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
return true;
}
bool GATEGPU::initstate(QState& psi, QState& psigpu, int qnum)
{
if (qnum >= 30)
{
if (nullptr == psi.real)
{
cudaError_t cuda_status = cudaHostAlloc(&psi.real, sizeof(double)*(1ll << qnum), cudaHostAllocMapped);
if (cuda_status != cudaSuccess)
{
printf("host alloc fail!\n");
return false;
}
cudaHostGetDevicePointer(&psigpu.real, psi.real, 0);
}
if (nullptr == psi.imag)
{
cudaError_t cuda_status1 = cudaHostAlloc(&psi.imag, sizeof(double)*(1ll << qnum), cudaHostAllocMapped);
if (cuda_status1 != cudaSuccess)
{
printf("host alloc fail!\n");
cudaFreeHost(psi.real);
return false;
}
cudaHostGetDevicePointer(&psigpu.imag, psi.imag, 0);
}
psi.qnum = qnum;
psigpu.qnum = qnum;
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
else
{
QSIZE Dim = 1 << qnum;
cudaError_t cuda_status;
if (nullptr == psi.real)
{
psi.real = (STATE_T*)malloc(sizeof(STATE_T)*Dim);
}
if (nullptr == psi.imag)
{
psi.imag = (STATE_T*)malloc(sizeof(STATE_T)*Dim);
}
if (nullptr == psigpu.real)
{
cuda_status = cudaMalloc((void**)&psigpu.real, sizeof(STATE_T)*Dim);
if (cudaSuccess != cuda_status)
{
printf("psigpu.real alloc gpu memoery error!\n");
free(psi.real);
free(psi.imag);
return false;
}
}
if (nullptr == psigpu.imag)
{
cuda_status = cudaMalloc((void**)&psigpu.imag, sizeof(STATE_T)*Dim);
if (cudaSuccess != cuda_status)
{
printf("psigpu.imag alloc gpu memoery error!\n");
free(psi.real);
free(psi.imag);
cudaFree(psigpu.real);
return false;
}
}
psigpu.qnum = qnum;
psi.qnum = qnum;
QSIZE BLOCKDIM;
BLOCKDIM = (1 << psigpu.qnum) / THREADDIM;
gpu::initState << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (psigpu.real, psigpu.imag, 1 << (psigpu.qnum));
}
return true;
}
bool GATEGPU::unitarysingle(
QState& psigpu,
size_t qn,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
if (isConjugate)
{
STATE_T temp_real, temp_imag;
temp_real = matrix.real[1];
temp_imag = matrix.imag[1];
matrix.real[1] = matrix.real[2];
matrix.imag[1] = matrix.imag[2];
matrix.real[2] = temp_real; //convert
matrix.imag[2] = temp_imag; //convert
for (size_t i = 0; i < 4; i++)
{
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
// matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
double real00 = matrix.real[0];
double real01 = matrix.real[1];
double real10 = matrix.real[2];
double real11 = matrix.real[3];
double imag00 = matrix.imag[0];
double imag01 = matrix.imag[1];
double imag10 = matrix.imag[2];
double imag11 = matrix.imag[3];
//test
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::unitarysingle << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn, real00, real01, real10, real11, imag00, imag01, imag10, imag11);
return true;
}
return true;
}
bool GATEGPU::controlunitarysingle(
QState& psigpu,
Qnum& qnum,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
double costheta, sintheta;
if (isConjugate)
{
STATE_T temp_real, temp_imag;
temp_real = matrix.real[1];
temp_imag = matrix.imag[1];
matrix.real[1] = matrix.real[2];
matrix.imag[1] = matrix.imag[2];
matrix.real[2] = temp_real; //convert
matrix.imag[2] = temp_imag; //convert
for (size_t i = 0; i < 4; i++)
{
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
//matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
QSIZE target_qubit = 1 << qnum.back();
// 1 is for the control single gate
QSIZE mask = getControllerMask(qnum, 1);
double real00 = matrix.real[0];
double real01 = matrix.real[1];
double real10 = matrix.real[2];
double real11 = matrix.real[3];
double imag00 = matrix.imag[0];
double imag01 = matrix.imag[1];
double imag10 = matrix.imag[2];
double imag11 = matrix.imag[3];
QSIZE BLOCKDIM;
SET_BLOCKDIM;
BLOCKDIM = (1 << (psigpu.qnum)) / THREADDIM;
gpu::controlunitarysingle << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(
psigpu.real,
psigpu.imag,
1 << (psigpu.qnum),
target_qubit,
mask,
real00, real01, real10, real11, imag00, imag01, imag10, imag11
);
return true;
}
return true;
}
//unitary double gate
bool GATEGPU::unitarydouble(
QState& psigpu,
size_t qn_0,
size_t qn_1,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
if (isConjugate)
{
STATE_T temp_real, temp_imag;
for (size_t i = 0; i < 4; i++)
{
for (size_t j = i + 1; j < 4; j++)
{
temp_real = matrix.real[4 * i + j];
temp_imag = matrix.imag[4 * i + j];
matrix.real[4 * i + j] = matrix.real[4 * j + i];
matrix.imag[4 * i + j] = matrix.imag[4 * j + i];
matrix.real[4 * j + i] = temp_real;
matrix.imag[4 * j + i] = temp_imag;
}
}
for (size_t i = 0; i < 16; i++)
{
//matrix[i].imag = -matrix[i].imag;
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
//matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::unitarydouble << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn_0, 1 << qn_1,
matrix.real[0], matrix.real[1], matrix.real[2], matrix.real[3],
matrix.real[4], matrix.real[5], matrix.real[6], matrix.real[7],
matrix.real[8], matrix.real[9], matrix.real[10], matrix.real[11],
matrix.real[12], matrix.real[13], matrix.real[14], matrix.real[15],
matrix.imag[0], matrix.imag[1], matrix.imag[2], matrix.imag[3],
matrix.imag[4], matrix.imag[5], matrix.imag[6], matrix.imag[7],
matrix.imag[8], matrix.imag[9], matrix.imag[10], matrix.imag[11],
matrix.imag[12], matrix.imag[13], matrix.imag[14], matrix.imag[15]);
return true;
}
return true;
}
bool GATEGPU::controlunitarydouble(
QState& psigpu,
Qnum& qnum,
QState& matrix,
bool isConjugate,
double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
double costheta, sintheta;
if (isConjugate)
{
STATE_T temp_real, temp_imag;
for (size_t i = 0; i < 4; i++)
{
for (size_t j = i + 1; j < 4; j++)
{
temp_real = matrix.real[4 * i + j];
temp_imag = matrix.imag[4 * i + j];
matrix.real[4 * i + j] = matrix.real[4 * j + i];
matrix.imag[4 * i + j] = matrix.imag[4 * j + i];
matrix.real[4 * j + i] = temp_real;
matrix.imag[4 * j + i] = temp_imag;
}
}
for (size_t i = 0; i < 16; i++)
{
matrix.real[i] = matrix.real[i];
matrix.imag[i] = -matrix.imag[i];
//matrix[i] = qcomplex_t(matrix[i].real(), -matrix[i].imag());
}//dagger
}
QSIZE m = qnum.size();
QSIZE target0 = qnum[m - 2];
QSIZE target1 = qnum.back();
sort(qnum.begin(), qnum.end());
QSIZE *h_block, *d_block;
h_block = (QSIZE *)malloc(sizeof(QSIZE) * m);
if (nullptr == h_block)
{
fprintf(stderr, "malloc error\n");
return false;
}
for (QSIZE i = 0; i < m; i++)
{
h_block[i] = 1 << qnum[i];
}
cudaError_t cuda_status;
cuda_status = cudaMalloc((void **)&d_block, sizeof(QSIZE) * m);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaMalloc error\n");
free(h_block);
return false;
}
cuda_status = cudaMemcpy(d_block, h_block, sizeof(QSIZE) * m, cudaMemcpyHostToDevice);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaMemcpy error\n");
free(h_block);
cudaFree(d_block);
return false;
}
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::controlunitarydouble << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), d_block, 1 << target0, 1 << target1, m,
matrix.real[0], matrix.real[1], matrix.real[2], matrix.real[3],
matrix.real[4], matrix.real[5], matrix.real[6], matrix.real[7],
matrix.real[8], matrix.real[9], matrix.real[10], matrix.real[11],
matrix.real[12], matrix.real[13], matrix.real[14], matrix.real[15],
matrix.imag[0], matrix.imag[1], matrix.imag[2], matrix.imag[3],
matrix.imag[4], matrix.imag[5], matrix.imag[6], matrix.imag[7],
matrix.imag[8], matrix.imag[9], matrix.imag[10], matrix.imag[11],
matrix.imag[12], matrix.imag[13], matrix.imag[14], matrix.imag[15]);
return true;
}
return true;
}
//qbReset
bool GATEGPU::qbReset(QState& psigpu, size_t qn, double error_rate)
{
if (gpu::randGenerator() > error_rate)
{
cudaError_t cuda_status;
double * resultgpu;
// cudaHostAlloc((void **)&result, sizeof(STATE_T)*(psigpu.qnum-1))/THREADDIM, cudaHostAllocMapped);
//cudaHostGetDevicePointer(&resultgpu, result, 0);
cuda_status = cudaMalloc((void **)&resultgpu, sizeof(STATE_T)*(1 << (psigpu.qnum - 1)) / THREADDIM);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaMalloc error\n");
return false;
}
double * probgpu, *prob;
cuda_status = cudaHostAlloc((void **)&prob, sizeof(STATE_T), cudaHostAllocMapped);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaHostAlloc error\n");
cudaFree(resultgpu);
}
cudaHostGetDevicePointer(&probgpu, prob, 0);
QSIZE BLOCKDIM;
SET_BLOCKDIM
gpu::qubitprob << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM, THREADDIM * sizeof(STATE_T) >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn, resultgpu);
cuda_status = cudaDeviceSynchronize();
gpu::probsum << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (resultgpu, probgpu);
cuda_status = cudaDeviceSynchronize();
*prob = 1 / sqrt(*prob);
gpu::qubitcollapse0 << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), 1 << qn, *prob);
cuda_status = cudaDeviceSynchronize();
cudaFree(resultgpu);
cudaFreeHost(prob);
return getSynchronizeResult(cuda_status, "qReset");
}
return true;
}
int GATEGPU::qubitmeasure(QState& psigpu, QSIZE Block, double* &resultgpu, double* &probgpu)
{
//double * resultgpu;
QSIZE BLOCKDIM;
SET_BLOCKDIM
int count = (0 == BLOCKDIM) ? 1 : BLOCKDIM;
double prob;
cudaError_t cuda_status;
if (nullptr == resultgpu)
{
cuda_status = cudaMalloc(&resultgpu, sizeof(STATE_T)* count);
if (cudaSuccess != cuda_status)
{
cout << "resultgpu " << cudaGetErrorString(cuda_status) << endl;
return -1;
}
}
if (nullptr == probgpu)
{
cuda_status = cudaMalloc(&probgpu, sizeof(STATE_T));
if (cudaSuccess != cuda_status)
{
cout << "probgpu " << cudaGetErrorString(cuda_status) << endl;
cudaFree(resultgpu);
resultgpu = nullptr;
return -1;
}
}
gpu::qubitprob << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM, THREADDIM * sizeof(STATE_T) >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), Block, resultgpu);
cuda_status = cudaDeviceSynchronize();
if (cudaSuccess != cuda_status)
{
cout << cudaGetErrorString(cuda_status) << endl;
return -1;
}
gpu::probsum << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> > (resultgpu, probgpu);
cuda_status = cudaDeviceSynchronize();
if (cudaSuccess != cuda_status)
{
cout << cudaGetErrorString(cuda_status) << endl;
return -1;
}
cuda_status = cudaMemcpy(&prob, probgpu, sizeof(STATE_T), cudaMemcpyDeviceToHost);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaMemcpy error\n");
return -1;
}
cuda_status = cudaDeviceSynchronize();
if (cudaSuccess != cuda_status)
{
cout << cudaGetErrorString(cuda_status) << endl;
return -1;
}
int outcome = 0;
if (gpu::randGenerator() > prob)
{
outcome = 1;
}
if (0 == outcome)
{
prob = 1 / sqrt(prob);
gpu::qubitcollapse0 << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), Block, prob);
}
else
{
prob = 1 / sqrt(1 - prob);
gpu::qubitcollapse1 << < (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >> >
(psigpu.real, psigpu.imag, 1 << (psigpu.qnum), Block, prob);
}
cuda_status = cudaDeviceSynchronize();
getSynchronizeResult(cuda_status, "qubitmeasure");
return outcome;
}//checked
bool probcompare(pair<size_t, double>& a, pair<size_t, double>& b)
{
return a.second > b.second;
}
bool GATEGPU::pMeasurenew(
QState& psigpu,
vector<pair<size_t, double>>& vprob,
Qnum& qnum,
int select_max)
{
return false;
}
static bool pMeasure_few_target(GATEGPU::QState&, vector<double>&, GATEGPU::Qnum&);
static bool pMeasure_many_target(GATEGPU::QState&, vector<double>&, GATEGPU::Qnum&);
bool GATEGPU::pMeasure_no_index(
QState& psigpu,
vector<double> &mResult,
Qnum& qnum)
{
// 10 可能是一个比较好的阈值,因为1024为线程单位,就更适合使用many_target
if (qnum.size() < 10)
{
return pMeasure_few_target(psigpu, mResult, qnum);
}
else
{
return pMeasure_many_target(psigpu, mResult, qnum);
}
}
static bool pMeasure_few_target(GATEGPU::QState& psigpu, vector<double>& mResult, GATEGPU::Qnum& qnum)
{
QSIZE result_size = 1 << qnum.size();
mResult.resize(result_size);
QSIZE Dim = 1 << psigpu.qnum; // 态矢总长度
QSIZE BLOCKDIM;
BLOCKDIM = Dim / result_size;
cudaError_t cudaStatus;
// 一般来说BLOCKDIM不可能为0,因为对于few target的情况,result_size<10
// 保险起见
BLOCKDIM = (BLOCKDIM == 0 ? 1 : BLOCKDIM);
STATE_T* result_gpu;
cudaStatus = cudaMalloc(&result_gpu, sizeof(STATE_T)*BLOCKDIM);
STATE_T* result_cpu;
result_cpu = (STATE_T*)malloc(sizeof(STATE_T)*BLOCKDIM);
QSIZE qnum_mask = 0;
// obtain the mask for pMeasure qubit
for (auto iter : qnum)
{
qnum_mask += (1 << iter);
}
QSIZE SHARED_SIZE = THREADDIM * sizeof(STATE_T);
for (int result_idx = 0; result_idx < result_size; ++result_idx)
{
gpu::pmeasure_one_target << < BLOCKDIM, THREADDIM, SHARED_SIZE >> > (
psigpu.real,
psigpu.imag,
result_gpu,
qnum_mask,
result_idx,
qnum.size(),
Dim);
cudaStatus = cudaMemcpy(result_cpu, result_gpu, sizeof(STATE_T)*BLOCKDIM, cudaMemcpyDeviceToHost);
STATE_T result_sum = 0;
for (int i = 0; i < BLOCKDIM; ++i)
{
result_sum += result_cpu[i];
}
mResult[result_idx] = result_sum;
}
cudaFree(result_gpu);
free(result_cpu);
return true;
}
static bool pMeasure_many_target(GATEGPU::QState& psigpu, vector<double>& mResult, GATEGPU::Qnum& qnum)
{
QSIZE qnum_mask = 0;
// obtain the mask for pMeasure qubit
for (auto iter : qnum)
{
qnum_mask += (1 << iter);
}
QSIZE result_size = 1 << qnum.size();
mResult.resize(result_size);
// allocate the graphics memory for result
STATE_T* result_gpu;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc(&result_gpu, result_size * sizeof(STATE_T));
cudaStatus = cudaMemset(result_gpu, 0, result_size * sizeof(STATE_T));
QSIZE BLOCKDIM;
BLOCKDIM = result_size / THREADDIM;
gpu::pmeasure_many_target <<< (BLOCKDIM == 0 ? 1 : BLOCKDIM), THREADDIM >>> (
psigpu.real,
psigpu.imag,
result_gpu,
qnum_mask,
result_size,
1 << (psigpu.qnum));
STATE_T* result = (STATE_T*)malloc(result_size * sizeof(STATE_T));
//cudaMemcpy(result, result_gpu, result_size * sizeof(STATE_T), cudaMemcpyDeviceToHost);
//for (QSIZE i = 0; i < result_size; ++i)
//{
// mResult[i] = result[i];
//}
cudaStatus = cudaMemcpy(&(mResult[0]), result_gpu, result_size * sizeof(STATE_T), cudaMemcpyDeviceToHost);
cudaFree(result_gpu);
return true;
}
bool GATEGPU::getState(QState &psi, QState &psigpu, int qnum)
{
cudaError_t cuda_status;
if (qnum < 30)
{
QSIZE Dim = 1 << qnum;
cuda_status = cudaMemcpy(psi.real, psigpu.real, sizeof(STATE_T)*Dim, cudaMemcpyDeviceToHost);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaMemcpy error\n");
return false;
}
cuda_status = cudaMemcpy(psi.imag, psigpu.imag, sizeof(STATE_T)*Dim, cudaMemcpyDeviceToHost);
if (cudaSuccess != cuda_status)
{
fprintf(stderr, "cudaMemcpy error\n");
return false;
}
}
return true;
}
void GATEGPU::gpuFree(double* memory)
{
if (memory != nullptr)
{
cudaFree(memory);
}
}
|
b2495857dfa66cfee003d63133070cf1098e7489.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include "cudnnUtils.h"
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* mean,
const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const sd::LongType xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle());
CHECK_CUDNN_FAILURE(cudnnSetStream(*handle, *context->getCudaStream()));
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if (isSpatialMode) { // 1xCx1x1
const int iC = static_cast<int>(mean->lengthOf());
const int stride0 = static_cast<int>(mean->strideAt(0));
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC * stride0, stride0, 1, 1})
: std::vector<int>({iC * stride0, stride0, 1, 1, 1});
} else {
paramsShape = std::vector<int>(mean->getShapeAsVector().begin(), mean->getShapeAsVector().end());
paramsStrides = xRank == 4
? std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3))})
: std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3)), static_cast<int>(mean->strideAt(4))});
}
std::vector<int> xStrides = {static_cast<int>(input->strideAt(0)), static_cast<int>(input->strideAt(1)), static_cast<int>(input->strideAt(2)),
static_cast<int>(input->strideAt(3))};
std::vector<int> zStrides = {static_cast<int>(output->strideAt(0)), static_cast<int>(output->strideAt(1)), static_cast<int>(output->strideAt(2)),
static_cast<int>(output->strideAt(3))};
if (xRank > 4) { // 5D
xStrides.push_back((sd::LongType)input->strideAt(4));
zStrides.push_back((sd::LongType)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
CudnnTensor x;
if (input->ews() == 1) {
x.setEx(format, dataType, xRank, xShape.data());
} else {
x.set(dataType, xRank, xShape.data(), xStrides.data());
}
// output descriptor
CudnnTensor z;
if (output->ews() == 1) {
z.setEx(format, dataType, xRank, xShape.data());
} else {
z.set(dataType, xRank, xShape.data(), zStrides.data());
}
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
CudnnTensor params;
if (mean->ews() == 1) {
params.setEx(format, dataType, xRank, paramsShape.data());
} else {
params.set(dataType, xRank, paramsShape.data(), paramsStrides.data());
}
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha =
output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta =
output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
CHECK_CUDNN_FAILURE_MSG(
STRINGIZE(cudnnBatchNormalizationForwardInference),
cudnnBatchNormalizationForwardInference(
*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION, ptrAlpha, ptrBeta, x,
input->specialBuffer(), z, output->specialBuffer(), params, gamma->specialBuffer(), beta->specialBuffer(),
mean->specialBuffer(), variance->specialBuffer(), epsilon));
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0) throw cuda_exception::build("batchnormCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
static void batchnormBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* mean,
const NDArray* variance, const NDArray* gamma, const NDArray* gradO, NDArray* gradI,
NDArray* gradG, NDArray* gradB, const double epsilon, const bool isSpatialMode) {
// input, gradO, gradI -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta, gradM, gradV, gradG, gradB -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for
// BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for
// BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if (isSpatialMode) { // 1xCx1x1
const int iC = static_cast<int>(mean->lengthOf());
const int stride0 = static_cast<int>(mean->strideAt(0));
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC * stride0, stride0, 1, 1})
: std::vector<int>({iC * stride0, stride0, 1, 1, 1});
} else {
paramsShape = std::vector<int>(mean->getShapeAsVector().begin(), mean->getShapeAsVector().end());
paramsStrides = xRank == 4
? std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3))})
: std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3)), static_cast<int>(mean->strideAt(4))});
}
std::vector<int> xStrides = {static_cast<int>(input->strideAt(0)), static_cast<int>(input->strideAt(1)), static_cast<int>(input->strideAt(2)),
static_cast<int>(input->strideAt(3))};
std::vector<int> dxStrides = {static_cast<int>(gradI->strideAt(0)), static_cast<int>(gradI->strideAt(1)), static_cast<int>(gradI->strideAt(2)),
static_cast<int>(gradI->strideAt(3))};
std::vector<int> dzStrides = {static_cast<int>(gradO->strideAt(0)), static_cast<int>(gradO->strideAt(1)), static_cast<int>(gradO->strideAt(2)),
static_cast<int>(gradO->strideAt(3))};
if (xRank > 4) { // 5D
xStrides.push_back(static_cast<int>(input->strideAt(4)));
dxStrides.push_back(static_cast<int>(gradI->strideAt(4)));
dzStrides.push_back(static_cast<int>(gradO->strideAt(4)));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
CudnnTensor x;
if (input->ews() == 1)
x.setEx(format, dataType, xRank, xShape.data());
else
x.set(dataType, xRank, xShape.data(), xStrides.data());
// gradO descriptor
CudnnTensor dz;
if (gradO->ews() == 1)
dz.setEx(format, dataType, xRank, xShape.data());
else
dz.set(dataType, xRank, xShape.data(), dzStrides.data());
// gradI descriptor
CudnnTensor dx;
if (input->ews() == 1)
dx.setEx(format, dataType, xRank, xShape.data());
else
dx.set(dataType, xRank, xShape.data(), dxStrides.data());
// mean, variance, gamma, gradG and gradB descriptor, the same descriptor for all of them
CudnnTensor params;
if (mean->ews() == 1) {
params.setEx(format, dataType, xRank, paramsShape.data());
} else {
params.set(dataType, xRank, paramsShape.data(), paramsStrides.data());
}
// provide scaling parameters
const float alpha32(1), beta32(0);
double alpha64(1), beta64(0);
const void* ptrAlpha =
input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta =
input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
// calculations
// TODO: we can use cache here
CHECK_CUDNN_FAILURE_MSG(
STRINGIZE(cudnnBatchNormalizationBackward),
cudnnBatchNormalizationBackward(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta, ptrAlpha, ptrBeta, x, input->specialBuffer(), dz,
gradO->specialBuffer(), dx, gradI->specialBuffer(), params,
gamma->specialBuffer(), gradG->specialBuffer(), gradB->specialBuffer(), epsilon,
nullptr /*mean->specialBuffer()*/, nullptr /*variance->specialBuffer()*/));
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0) throw cuda_exception::build("batchnormBpCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if (applyScale) gamma = INPUT_VARIABLE(3);
if (applyOffset) beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(inRank - 1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0,
"BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or "
"equal to rank of input array, but got %i and %i correspondingly !",
numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes =
// {3}, then expected shape would be {5}
std::vector<sd::LongType> expShape;
if (numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<sd::LongType>(inRank, 1);
for (sd::LongType i = 0; i < numOfAxes; ++i) expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if (gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if (beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for (int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0,
"BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
std::unique_ptr<NDArray> tmpGamma = {}, tmpBeta = {}, tmpInput = {}, tmpOutput = {};
if (needPermut) { // if NHWC
std::vector<sd::LongType> perm =
inRank == 4 ? std::vector<sd::LongType>({0, 3, 1, 2}) : std::vector<sd::LongType>({0, 4, 1, 2, 3}); // NHWC -> NCHW
tmpInput.reset(new NDArray(input->permute(perm)));
tmpOutput.reset(new NDArray(output->permute(perm)));
input = tmpInput.get();
output = tmpOutput.get();
}
// cudnn requires gamma and beta to be non-nullptr
if (!applyScale) {
tmpGamma.reset(new NDArray(mean));
gamma = tmpGamma.get();
*gamma = 1;
}
if (!applyOffset) {
tmpBeta.reset(new NDArray(mean));
beta = tmpBeta.get();
*beta = 0;
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
return sd::Status::OK;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(xRank - 1); // default dimension to reduce along is last dimension
Requirements req("CUDNN BATCHNORM OP");
req.expectIn(makeInfoVariable(xRank, RANK_MSG_INPUT0), {4, 5}) &&
req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0),
{DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) &&
req.expectIn(makeInfoVariable(axes.size(), "axes.size()"), {1, 3, 4}) &&
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1), makeShapeInfoVariable(variance, SHAPE_MSG_INPUT2),
[](const decltype(mean)& l, const decltype(variance)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
if (gamma) {
req.expect(
makeShapeInfoVariable(gamma, SHAPE_MSG_INPUT_ "#gamma"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gamma)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (beta) {
req.expect(
makeShapeInfoVariable(beta, SHAPE_MSG_INPUT_ "#beta"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(beta)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (axes.size() == 1) {
req.expectIn(makeInfoVariable(mean->lengthOf(), LENGTH_MSG_INPUT1), {-1, 1});
} else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
// mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
makeShapeInfoVariable(inputShapeModif, SHAPE_MSG_INPUT_ "#expect"),
[](const decltype(mean)& l, const decltype(inputShapeModif)& r) { return l->isSameShape(r); }, EXPECTED_EQ_MSG);
}
req.logTheSuccess();
return req;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const float epsilon = T_ARG(0);
if (applyScale) {
gamma = INPUT_VARIABLE(3);
gradG = OUTPUT_VARIABLE(3);
}
if (applyOffset) {
beta = INPUT_VARIABLE(3 + (int)applyScale);
gradB = OUTPUT_VARIABLE(3 + (int)applyScale);
}
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(inRank - 1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0,
"BATCHNORM_BP CUDNN op: too big number of input axes to normalize over, expected number should be less "
"or equal to rank of input array, but got %i and %i correspondingly !",
numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes =
// {3}, then expected shape would be {5}
std::vector<sd::LongType> expShape;
if (numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<sd::LongType>(inRank, 1);
for (sd::LongType i = 0; i < numOfAxes; ++i) expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if (gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if (beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
REQUIRE_TRUE(input->isSameShape(gradO), 0,
"BATCHNORM_BP CUDNN op: wrong shape of output gradients array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(gradO).c_str());
// types of all input arrays should be the same (except gradO)
for (int i = 1; i < block.width() - 2; ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0,
"BATCHNORM_BP CUDNN op: types of arrays (input, mean, variance, gamma, beta) should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() != input->sizeAt(1);
std::unique_ptr<NDArray> tmpGamma = {}, tmpGradG = {}, tmpGradB = {}, tmpInput = {}, tmpGradI = {}, tmpGradO = {};
if (needPermut) { // if NHWC
std::vector<sd::LongType> perm =
inRank == 4 ? std::vector<sd::LongType>({0, 3, 1, 2}) : std::vector<sd::LongType>({0, 4, 1, 2, 3}); // NHWC -> NCHW
tmpInput.reset(new NDArray(input->permute(perm)));
tmpGradO.reset(new NDArray(gradO->permute(perm)));
tmpGradI.reset(new NDArray(gradI->permute(perm)));
input = tmpInput.get();
gradO = tmpGradO.get();
gradI = tmpGradI.get();
}
// cudnn requires gamma, gradG, gradB to be non-nullptr
if (!applyScale) {
tmpGamma.reset(new NDArray(mean));
tmpGradG.reset(new NDArray(mean));
gamma = tmpGamma.get();
gradG = tmpGradG.get();
*gamma = 1;
}
if (!applyOffset) {
tmpGradB.reset(new NDArray(mean));
gradB = tmpGradB.get();
}
// calculations
batchnormBpCUDNN(block.launchContext(), input, mean, variance, gamma, gradO, gradI, gradG, gradB, epsilon,
axes.size() == 1);
*gradM = 0; // put zeros so far
*gradV = 0; // put zeros so far
return sd::Status::OK;
}
PLATFORM_CHECK(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(xRank - 1); // default dimension to reduce along is last dimension
Requirements req("CUDNN BATCHNORM_BP OP");
req.expectIn(makeInfoVariable(xRank, RANK_MSG_INPUT0), {4, 5}) &&
req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0),
{DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) &&
req.expectIn(makeInfoVariable(axes.size(), "axes.size()"), {1, 3, 4}) &&
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1), makeShapeInfoVariable(variance, SHAPE_MSG_INPUT2),
[](const decltype(mean)& l, const decltype(variance)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
if (gamma) {
req.expect(
makeShapeInfoVariable(gamma, SHAPE_MSG_INPUT_ "#gamma"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gamma)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (gradG) {
req.expect(
makeShapeInfoVariable(gradG, SHAPE_MSG_INPUT_ "#gradG"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gradG)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (gradB) {
req.expect(
makeShapeInfoVariable(gradB, SHAPE_MSG_INPUT_ "#gradB"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gradB)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (axes.size() == 1) {
// isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
req.expectIn(makeInfoVariable(mean->lengthOf(), LENGTH_MSG_INPUT1), {-1, 1});
} else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
// isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or
// [1,dim1,dim2,dim3,dim4]
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
makeShapeInfoVariable(inputShapeModif, SHAPE_MSG_INPUT_ "#expect"),
[](const decltype(mean)& l, const decltype(inputShapeModif)& r) { return l->isSameShape(r); }, EXPECTED_EQ_MSG);
}
req.logTheSuccess();
return req;
}
} // namespace platforms
} // namespace ops
} // namespace sd
| b2495857dfa66cfee003d63133070cf1098e7489.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include "cudnnUtils.h"
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* mean,
const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const sd::LongType xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle());
CHECK_CUDNN_FAILURE(cudnnSetStream(*handle, *context->getCudaStream()));
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if (isSpatialMode) { // 1xCx1x1
const int iC = static_cast<int>(mean->lengthOf());
const int stride0 = static_cast<int>(mean->strideAt(0));
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC * stride0, stride0, 1, 1})
: std::vector<int>({iC * stride0, stride0, 1, 1, 1});
} else {
paramsShape = std::vector<int>(mean->getShapeAsVector().begin(), mean->getShapeAsVector().end());
paramsStrides = xRank == 4
? std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3))})
: std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3)), static_cast<int>(mean->strideAt(4))});
}
std::vector<int> xStrides = {static_cast<int>(input->strideAt(0)), static_cast<int>(input->strideAt(1)), static_cast<int>(input->strideAt(2)),
static_cast<int>(input->strideAt(3))};
std::vector<int> zStrides = {static_cast<int>(output->strideAt(0)), static_cast<int>(output->strideAt(1)), static_cast<int>(output->strideAt(2)),
static_cast<int>(output->strideAt(3))};
if (xRank > 4) { // 5D
xStrides.push_back((sd::LongType)input->strideAt(4));
zStrides.push_back((sd::LongType)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
CudnnTensor x;
if (input->ews() == 1) {
x.setEx(format, dataType, xRank, xShape.data());
} else {
x.set(dataType, xRank, xShape.data(), xStrides.data());
}
// output descriptor
CudnnTensor z;
if (output->ews() == 1) {
z.setEx(format, dataType, xRank, xShape.data());
} else {
z.set(dataType, xRank, xShape.data(), zStrides.data());
}
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
CudnnTensor params;
if (mean->ews() == 1) {
params.setEx(format, dataType, xRank, paramsShape.data());
} else {
params.set(dataType, xRank, paramsShape.data(), paramsStrides.data());
}
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha =
output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta =
output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
CHECK_CUDNN_FAILURE_MSG(
STRINGIZE(cudnnBatchNormalizationForwardInference),
cudnnBatchNormalizationForwardInference(
*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION, ptrAlpha, ptrBeta, x,
input->specialBuffer(), z, output->specialBuffer(), params, gamma->specialBuffer(), beta->specialBuffer(),
mean->specialBuffer(), variance->specialBuffer(), epsilon));
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0) throw cuda_exception::build("batchnormCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
static void batchnormBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* mean,
const NDArray* variance, const NDArray* gamma, const NDArray* gradO, NDArray* gradI,
NDArray* gradG, NDArray* gradB, const double epsilon, const bool isSpatialMode) {
// input, gradO, gradI -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta, gradM, gradV, gradG, gradB -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for
// BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for
// BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if (isSpatialMode) { // 1xCx1x1
const int iC = static_cast<int>(mean->lengthOf());
const int stride0 = static_cast<int>(mean->strideAt(0));
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC * stride0, stride0, 1, 1})
: std::vector<int>({iC * stride0, stride0, 1, 1, 1});
} else {
paramsShape = std::vector<int>(mean->getShapeAsVector().begin(), mean->getShapeAsVector().end());
paramsStrides = xRank == 4
? std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3))})
: std::vector<int>({static_cast<int>(mean->strideAt(0)), static_cast<int>(mean->strideAt(1)), static_cast<int>(mean->strideAt(2)),
static_cast<int>(mean->strideAt(3)), static_cast<int>(mean->strideAt(4))});
}
std::vector<int> xStrides = {static_cast<int>(input->strideAt(0)), static_cast<int>(input->strideAt(1)), static_cast<int>(input->strideAt(2)),
static_cast<int>(input->strideAt(3))};
std::vector<int> dxStrides = {static_cast<int>(gradI->strideAt(0)), static_cast<int>(gradI->strideAt(1)), static_cast<int>(gradI->strideAt(2)),
static_cast<int>(gradI->strideAt(3))};
std::vector<int> dzStrides = {static_cast<int>(gradO->strideAt(0)), static_cast<int>(gradO->strideAt(1)), static_cast<int>(gradO->strideAt(2)),
static_cast<int>(gradO->strideAt(3))};
if (xRank > 4) { // 5D
xStrides.push_back(static_cast<int>(input->strideAt(4)));
dxStrides.push_back(static_cast<int>(gradI->strideAt(4)));
dzStrides.push_back(static_cast<int>(gradO->strideAt(4)));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
CudnnTensor x;
if (input->ews() == 1)
x.setEx(format, dataType, xRank, xShape.data());
else
x.set(dataType, xRank, xShape.data(), xStrides.data());
// gradO descriptor
CudnnTensor dz;
if (gradO->ews() == 1)
dz.setEx(format, dataType, xRank, xShape.data());
else
dz.set(dataType, xRank, xShape.data(), dzStrides.data());
// gradI descriptor
CudnnTensor dx;
if (input->ews() == 1)
dx.setEx(format, dataType, xRank, xShape.data());
else
dx.set(dataType, xRank, xShape.data(), dxStrides.data());
// mean, variance, gamma, gradG and gradB descriptor, the same descriptor for all of them
CudnnTensor params;
if (mean->ews() == 1) {
params.setEx(format, dataType, xRank, paramsShape.data());
} else {
params.set(dataType, xRank, paramsShape.data(), paramsStrides.data());
}
// provide scaling parameters
const float alpha32(1), beta32(0);
double alpha64(1), beta64(0);
const void* ptrAlpha =
input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta =
input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
// calculations
// TODO: we can use cache here
CHECK_CUDNN_FAILURE_MSG(
STRINGIZE(cudnnBatchNormalizationBackward),
cudnnBatchNormalizationBackward(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta, ptrAlpha, ptrBeta, x, input->specialBuffer(), dz,
gradO->specialBuffer(), dx, gradI->specialBuffer(), params,
gamma->specialBuffer(), gradG->specialBuffer(), gradB->specialBuffer(), epsilon,
nullptr /*mean->specialBuffer()*/, nullptr /*variance->specialBuffer()*/));
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0) throw cuda_exception::build("batchnormBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if (applyScale) gamma = INPUT_VARIABLE(3);
if (applyOffset) beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(inRank - 1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0,
"BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or "
"equal to rank of input array, but got %i and %i correspondingly !",
numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes =
// {3}, then expected shape would be {5}
std::vector<sd::LongType> expShape;
if (numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<sd::LongType>(inRank, 1);
for (sd::LongType i = 0; i < numOfAxes; ++i) expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if (gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if (beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0,
"BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for (int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0,
"BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
std::unique_ptr<NDArray> tmpGamma = {}, tmpBeta = {}, tmpInput = {}, tmpOutput = {};
if (needPermut) { // if NHWC
std::vector<sd::LongType> perm =
inRank == 4 ? std::vector<sd::LongType>({0, 3, 1, 2}) : std::vector<sd::LongType>({0, 4, 1, 2, 3}); // NHWC -> NCHW
tmpInput.reset(new NDArray(input->permute(perm)));
tmpOutput.reset(new NDArray(output->permute(perm)));
input = tmpInput.get();
output = tmpOutput.get();
}
// cudnn requires gamma and beta to be non-nullptr
if (!applyScale) {
tmpGamma.reset(new NDArray(mean));
gamma = tmpGamma.get();
*gamma = 1;
}
if (!applyOffset) {
tmpBeta.reset(new NDArray(mean));
beta = tmpBeta.get();
*beta = 0;
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
return sd::Status::OK;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(xRank - 1); // default dimension to reduce along is last dimension
Requirements req("CUDNN BATCHNORM OP");
req.expectIn(makeInfoVariable(xRank, RANK_MSG_INPUT0), {4, 5}) &&
req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0),
{DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) &&
req.expectIn(makeInfoVariable(axes.size(), "axes.size()"), {1, 3, 4}) &&
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1), makeShapeInfoVariable(variance, SHAPE_MSG_INPUT2),
[](const decltype(mean)& l, const decltype(variance)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
if (gamma) {
req.expect(
makeShapeInfoVariable(gamma, SHAPE_MSG_INPUT_ "#gamma"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gamma)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (beta) {
req.expect(
makeShapeInfoVariable(beta, SHAPE_MSG_INPUT_ "#beta"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(beta)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (axes.size() == 1) {
req.expectIn(makeInfoVariable(mean->lengthOf(), LENGTH_MSG_INPUT1), {-1, 1});
} else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
// mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
makeShapeInfoVariable(inputShapeModif, SHAPE_MSG_INPUT_ "#expect"),
[](const decltype(mean)& l, const decltype(inputShapeModif)& r) { return l->isSameShape(r); }, EXPECTED_EQ_MSG);
}
req.logTheSuccess();
return req;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const float epsilon = T_ARG(0);
if (applyScale) {
gamma = INPUT_VARIABLE(3);
gradG = OUTPUT_VARIABLE(3);
}
if (applyOffset) {
beta = INPUT_VARIABLE(3 + (int)applyScale);
gradB = OUTPUT_VARIABLE(3 + (int)applyScale);
}
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(inRank - 1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0,
"BATCHNORM_BP CUDNN op: too big number of input axes to normalize over, expected number should be less "
"or equal to rank of input array, but got %i and %i correspondingly !",
numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes =
// {3}, then expected shape would be {5}
std::vector<sd::LongType> expShape;
if (numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<sd::LongType>(inRank, 1);
for (sd::LongType i = 0; i < numOfAxes; ++i) expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if (gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if (beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0,
"BATCHNORM_BP CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
REQUIRE_TRUE(input->isSameShape(gradO), 0,
"BATCHNORM_BP CUDNN op: wrong shape of output gradients array, expected is %s, but got %s instead !",
ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(gradO).c_str());
// types of all input arrays should be the same (except gradO)
for (int i = 1; i < block.width() - 2; ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0,
"BATCHNORM_BP CUDNN op: types of arrays (input, mean, variance, gamma, beta) should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() != input->sizeAt(1);
std::unique_ptr<NDArray> tmpGamma = {}, tmpGradG = {}, tmpGradB = {}, tmpInput = {}, tmpGradI = {}, tmpGradO = {};
if (needPermut) { // if NHWC
std::vector<sd::LongType> perm =
inRank == 4 ? std::vector<sd::LongType>({0, 3, 1, 2}) : std::vector<sd::LongType>({0, 4, 1, 2, 3}); // NHWC -> NCHW
tmpInput.reset(new NDArray(input->permute(perm)));
tmpGradO.reset(new NDArray(gradO->permute(perm)));
tmpGradI.reset(new NDArray(gradI->permute(perm)));
input = tmpInput.get();
gradO = tmpGradO.get();
gradI = tmpGradI.get();
}
// cudnn requires gamma, gradG, gradB to be non-nullptr
if (!applyScale) {
tmpGamma.reset(new NDArray(mean));
tmpGradG.reset(new NDArray(mean));
gamma = tmpGamma.get();
gradG = tmpGradG.get();
*gamma = 1;
}
if (!applyOffset) {
tmpGradB.reset(new NDArray(mean));
gradB = tmpGradB.get();
}
// calculations
batchnormBpCUDNN(block.launchContext(), input, mean, variance, gamma, gradO, gradI, gradG, gradB, epsilon,
axes.size() == 1);
*gradM = 0; // put zeros so far
*gradV = 0; // put zeros so far
return sd::Status::OK;
}
PLATFORM_CHECK(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if (numOfIntArgs > 2)
for (int i = 2; i < numOfIntArgs; ++i) axes.push_back(INT_ARG(i));
else
axes.push_back(xRank - 1); // default dimension to reduce along is last dimension
Requirements req("CUDNN BATCHNORM_BP OP");
req.expectIn(makeInfoVariable(xRank, RANK_MSG_INPUT0), {4, 5}) &&
req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0),
{DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) &&
req.expectIn(makeInfoVariable(axes.size(), "axes.size()"), {1, 3, 4}) &&
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1), makeShapeInfoVariable(variance, SHAPE_MSG_INPUT2),
[](const decltype(mean)& l, const decltype(variance)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
if (gamma) {
req.expect(
makeShapeInfoVariable(gamma, SHAPE_MSG_INPUT_ "#gamma"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gamma)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (gradG) {
req.expect(
makeShapeInfoVariable(gradG, SHAPE_MSG_INPUT_ "#gradG"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gradG)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (gradB) {
req.expect(
makeShapeInfoVariable(gradB, SHAPE_MSG_INPUT_ "#gradB"), makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
[](const decltype(gradB)& l, const decltype(mean)& r) {
return shape::haveSameShapeAndStrides(l->shapeInfo(), r->shapeInfo());
},
EXPECTED_EQ_MSG);
}
if (axes.size() == 1) {
// isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
req.expectIn(makeInfoVariable(mean->lengthOf(), LENGTH_MSG_INPUT1), {-1, 1});
} else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
// isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or
// [1,dim1,dim2,dim3,dim4]
req.expect(
makeShapeInfoVariable(mean, SHAPE_MSG_INPUT1),
makeShapeInfoVariable(inputShapeModif, SHAPE_MSG_INPUT_ "#expect"),
[](const decltype(mean)& l, const decltype(inputShapeModif)& r) { return l->isSameShape(r); }, EXPECTED_EQ_MSG);
}
req.logTheSuccess();
return req;
}
} // namespace platforms
} // namespace ops
} // namespace sd
|
32af7092dccd2b00ef82021a9457e9b56c1566a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "leaf_pointer_jumping.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *parent = NULL;
hipMalloc(&parent, XSIZE*YSIZE);
int *vertex_state = NULL;
hipMalloc(&vertex_state, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
leaf_pointer_jumping), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,vertex_state,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
leaf_pointer_jumping), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,vertex_state,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
leaf_pointer_jumping), dim3(gridBlock),dim3(threadBlock), 0, 0, parent,vertex_state,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 32af7092dccd2b00ef82021a9457e9b56c1566a8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "leaf_pointer_jumping.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *parent = NULL;
cudaMalloc(&parent, XSIZE*YSIZE);
int *vertex_state = NULL;
cudaMalloc(&vertex_state, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
leaf_pointer_jumping<<<gridBlock,threadBlock>>>(parent,vertex_state,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
leaf_pointer_jumping<<<gridBlock,threadBlock>>>(parent,vertex_state,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
leaf_pointer_jumping<<<gridBlock,threadBlock>>>(parent,vertex_state,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0b7d889d03bc24823df154d8010e914a4af3d914.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/hip/execution_policy.h>
#include "caffe2/operators/summarize_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
// structure used to accumulate the moments and other statistical properties
// encountered so far.
template <typename T>
struct SummaryStatsData {
T n;
T min;
T max;
T mean;
T M2;
// initialize to the identity element
void initialize() {
n = mean = M2 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op {
__host__ __device__ SummaryStatsData<T> operator()(const T& x) const {
SummaryStatsData<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two SummaryStatsData
// structs and returns a new SummaryStatsData which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const SummaryStatsData<T>&,
const SummaryStatsData<T>&,
SummaryStatsData<T> > {
__host__ __device__ SummaryStatsData<T> operator()(
const SummaryStatsData<T>& x, const SummaryStatsData <T>& y) const {
SummaryStatsData<T> result;
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
} // namespace
template<>
bool SummarizeOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const int N = X.size();
DCHECK_GT(N, 0);
// TODO(Yangqing): Any better way to avoid having to const cast?
thrust::device_ptr<float> Xdata(const_cast<float*>(X.data()));
summary_stats_unary_op<float> unary_op;
summary_stats_binary_op<float> binary_op;
SummaryStatsData<float> init;
init.initialize();
// compute summary statistics
SummaryStatsData<float> result = thrust::transform_reduce(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(device_context_.cuda_stream()),
#endif // TORCH_HIP_VERSION
Xdata, Xdata + N, unary_op, init, binary_op);
float standard_deviation = std::sqrt(result.variance());
if (to_file_) {
(*log_file_) << result.min << " " << result.max << " " << result.mean << " "
<< standard_deviation << std::endl;
}
if (OutputSize()) {
auto* Y = OperatorBase::Output<Tensor<float, CUDAContext> >(0);
Y->Reshape(std::vector<int>{4});
float output_buffer[NUM_STATS] = {result.min, result.max, result.mean,
standard_deviation};
device_context_.Copy<float, CPUContext, CUDAContext>(
NUM_STATS, output_buffer, Y->mutable_data());
}
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(Summarize, SummarizeOp<float, CUDAContext>)
} // namespace
} // namespace caffe2
| 0b7d889d03bc24823df154d8010e914a4af3d914.cu | #include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include "caffe2/operators/summarize_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
// structure used to accumulate the moments and other statistical properties
// encountered so far.
template <typename T>
struct SummaryStatsData {
T n;
T min;
T max;
T mean;
T M2;
// initialize to the identity element
void initialize() {
n = mean = M2 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op {
__host__ __device__ SummaryStatsData<T> operator()(const T& x) const {
SummaryStatsData<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two SummaryStatsData
// structs and returns a new SummaryStatsData which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const SummaryStatsData<T>&,
const SummaryStatsData<T>&,
SummaryStatsData<T> > {
__host__ __device__ SummaryStatsData<T> operator()(
const SummaryStatsData<T>& x, const SummaryStatsData <T>& y) const {
SummaryStatsData<T> result;
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
} // namespace
template<>
bool SummarizeOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const int N = X.size();
DCHECK_GT(N, 0);
// TODO(Yangqing): Any better way to avoid having to const cast?
thrust::device_ptr<float> Xdata(const_cast<float*>(X.data()));
summary_stats_unary_op<float> unary_op;
summary_stats_binary_op<float> binary_op;
SummaryStatsData<float> init;
init.initialize();
// compute summary statistics
SummaryStatsData<float> result = thrust::transform_reduce(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(device_context_.cuda_stream()),
#endif // CUDA_VERSION
Xdata, Xdata + N, unary_op, init, binary_op);
float standard_deviation = std::sqrt(result.variance());
if (to_file_) {
(*log_file_) << result.min << " " << result.max << " " << result.mean << " "
<< standard_deviation << std::endl;
}
if (OutputSize()) {
auto* Y = OperatorBase::Output<Tensor<float, CUDAContext> >(0);
Y->Reshape(std::vector<int>{4});
float output_buffer[NUM_STATS] = {result.min, result.max, result.mean,
standard_deviation};
device_context_.Copy<float, CPUContext, CUDAContext>(
NUM_STATS, output_buffer, Y->mutable_data());
}
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(Summarize, SummarizeOp<float, CUDAContext>)
} // namespace
} // namespace caffe2
|
cd9e03882cd9d099764435260e6202fdf0ac3324.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <random>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
void randomize_vector_float(thrust::host_vector<float> &h_vec, float start, float stop)
{
// randomize the seed, create distribution
auto seed = std::chrono::system_clock::now().time_since_epoch().count();
std::mt19937 gen(seed);
// fill array with random values
std::uniform_real_distribution<float> dist(start, stop);
for (thrust::host_vector<float>::iterator i = h_vec.begin(); i != h_vec.end(); i++)
{
*i = dist(gen);
}
}
int main(int argc, char *argv[])
{
int n = atoi(argv[1]);
thrust::host_vector<float> h_vec(n);
randomize_vector_float(h_vec, -1.0, 1.0);
thrust::device_vector<float> d_vec = h_vec;
// timing variables
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
float result = thrust::reduce(d_vec.begin(), d_vec.end());
hipEventRecord(stop);
hipEventSynchronize(stop);
// calc time in ms
float ms;
hipEventElapsedTime(&ms, start, stop);
printf("%f\n", result);
printf("%f\n", ms);
}
| cd9e03882cd9d099764435260e6202fdf0ac3324.cu | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <random>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
void randomize_vector_float(thrust::host_vector<float> &h_vec, float start, float stop)
{
// randomize the seed, create distribution
auto seed = std::chrono::system_clock::now().time_since_epoch().count();
std::mt19937 gen(seed);
// fill array with random values
std::uniform_real_distribution<float> dist(start, stop);
for (thrust::host_vector<float>::iterator i = h_vec.begin(); i != h_vec.end(); i++)
{
*i = dist(gen);
}
}
int main(int argc, char *argv[])
{
int n = atoi(argv[1]);
thrust::host_vector<float> h_vec(n);
randomize_vector_float(h_vec, -1.0, 1.0);
thrust::device_vector<float> d_vec = h_vec;
// timing variables
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
float result = thrust::reduce(d_vec.begin(), d_vec.end());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// calc time in ms
float ms;
cudaEventElapsedTime(&ms, start, stop);
printf("%f\n", result);
printf("%f\n", ms);
}
|
1ee49034efababa579d23e090d77981ad17ea353.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include "Error.h"
__global__ void cubeKernel(float * d_out, float * d_in){
// -:YOUR CODE HERE:-
}
void onDevice(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){
// declare GPU memory pointers
// -:YOUR CODE HERE:-
// allocate GPU memory
// -:YOUR CODE HERE:-
// transfer the array to the GPU
// -:YOUR CODE HERE:-
// launch the kernel
hipLaunchKernelGGL(( cubeKernel), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
// -:YOUR CODE HERE:-
// free GPU memory pointers
// -:YOUR CODE HERE:-
}
void test(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
assert( h_out[i] == (h_in[i] * h_in[i] * h_in[i]) );
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
printf("-: successful execution :-\n");
}
void onHost(){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// declare CPU memory pointers
// -:YOUR CODE HERE:-
// Allocate CPU memory pointers
// -:YOUR CODE HERE:-
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
// call the kernel
onDevice(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES);
test(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES);
// free CPU memory pointers
// -:YOUR CODE HERE:-
}
int main(int argc, char ** argv) {
onHost();
return 0;
} | 1ee49034efababa579d23e090d77981ad17ea353.cu | #include <stdio.h>
#include <assert.h>
#include "Error.h"
__global__ void cubeKernel(float * d_out, float * d_in){
// -:YOUR CODE HERE:-
}
void onDevice(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){
// declare GPU memory pointers
// -:YOUR CODE HERE:-
// allocate GPU memory
// -:YOUR CODE HERE:-
// transfer the array to the GPU
// -:YOUR CODE HERE:-
// launch the kernel
cubeKernel<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
// -:YOUR CODE HERE:-
// free GPU memory pointers
// -:YOUR CODE HERE:-
}
void test(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
assert( h_out[i] == (h_in[i] * h_in[i] * h_in[i]) );
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
printf("-: successful execution :-\n");
}
void onHost(){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// declare CPU memory pointers
// -:YOUR CODE HERE:-
// Allocate CPU memory pointers
// -:YOUR CODE HERE:-
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
// call the kernel
onDevice(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES);
test(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES);
// free CPU memory pointers
// -:YOUR CODE HERE:-
}
int main(int argc, char ** argv) {
onHost();
return 0;
} |
d299647a686991185143571108cd28d1396e82c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#define TILE_SIZE 300
/**
* Clamps the value val in the interval [lo, high].
* Equivalent to max(lo, min(val, high)).
*
* @param val: value to clamp.
* @param lo: lower bound for the clamping.
* @param high: higher bound for the clamping.
* @return val clamped between lo and high.
*/
template< typename T > __device__ T clamp(T val, T lo, T high) {
return max(lo, min(val, high));
}
/**
* Returns the value of a Gaussian function with an standard
* deviation of sigma and an input value of x.
*
* @param x: input value of the Gaussian function.
* @param sigma: standard deviation of Gaussian function.
* @return result of the Gaussian function.
*/
__device__ float gauss_func(int x, float sigma) {
return exp(-(pow(x, 2.0) / (2.0 * pow(sigma, 2.0))));
}
__global__ void process(const cv::cuda::PtrStep<uchar3> src,
cv::cuda::PtrStep<uchar3> dst, int rows, int cols,
int kernel_size, int sigma, bool first_pass) {
int dst_x;
int dst_y;
if(first_pass) {
dst_x = TILE_SIZE * blockIdx.x + threadIdx.x-kernel_size;
dst_y = blockDim.y * blockIdx.y + threadIdx.y;
} else {
dst_x = blockDim.x * blockIdx.x + threadIdx.x;
dst_y = TILE_SIZE * blockIdx.y + threadIdx.y-kernel_size;
}
// Filter radius
const int kernel_div2 = kernel_size / 2;
// Create shared memory using externally passed size
extern __shared__ uchar3 tile[];
int px = clamp<float>(dst_x, 0, cols-1);
int py = clamp<float>(dst_y, 0, rows-1);
// Cache pixels in shared memory
if(first_pass) {
tile[threadIdx.x] = src(py, px);
} else {
tile[threadIdx.y] = src(py, px);
}
// Wait until all thread cache their pixes values
__syncthreads();
bool is_inside_tile;
if(first_pass) {
is_inside_tile =kernel_div2 <= threadIdx.x && threadIdx.x < TILE_SIZE + kernel_div2;
if (dst_x < cols && dst_y < rows && is_inside_tile) {
float3 val = make_float3(0, 0, 0);
float gauss_sum = 0;
for (int m = -kernel_div2; m <= kernel_div2; m++) {
float gauss_val = gauss_func(m, sigma);
gauss_sum += gauss_val;
int tx = threadIdx.x+m;
uchar3 pix = tile[tx];
val.x += (float)pix.x*gauss_val;
val.y += (float)pix.y*gauss_val;
val.z += (float)pix.z*gauss_val;
}
val.x = val.x/gauss_sum;
val.y = val.y/gauss_sum;
val.z = val.z/gauss_sum;
dst(dst_y, dst_x).x = val.x;
dst(dst_y, dst_x).y = val.y;
dst(dst_y, dst_x).z = val.z;
}
} else {
is_inside_tile = kernel_div2 <= threadIdx.y && threadIdx.y < TILE_SIZE + kernel_div2;
if (dst_x < cols && dst_y < rows && is_inside_tile) {
float3 val = make_float3(0, 0, 0);
float gauss_sum = 0;
for (int n = -kernel_div2; n <= kernel_div2; n++) {
float gauss_val = gauss_func(n, sigma);
gauss_sum += gauss_val;
int ty = threadIdx.y+n;
uchar3 pix = tile[ty];
val.x += (float)pix.x*gauss_val;
val.y += (float)pix.y*gauss_val;
val.z += (float)pix.z*gauss_val;
}
val.x = val.x/gauss_sum;
val.y = val.y/gauss_sum;
val.z = val.z/gauss_sum;
dst(dst_y, dst_x).x = val.x;
dst(dst_y, dst_x).y = val.y;
dst(dst_y, dst_x).z = val.z;
}
}
}
int divUp(int a, int b) {
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void startCUDA (cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, int KERNEL_SIZE,
float SIGMA, bool first_pass) {
const dim3 blockX(TILE_SIZE+KERNEL_SIZE);
const dim3 gridX(divUp(dst.cols, TILE_SIZE)+1, divUp(dst.rows, blockX.y));
const dim3 blockY(1, TILE_SIZE+KERNEL_SIZE);
const dim3 gridY(divUp(dst.cols, blockY.x), divUp(dst.rows, TILE_SIZE)+1);
// Create a tile to process pixels within a block's shared memory
int shmem_size = sizeof(uchar3)*(TILE_SIZE+KERNEL_SIZE);
if(first_pass) {
hipLaunchKernelGGL(( process), dim3(gridX), dim3(blockX), shmem_size, 0, src, dst, dst.rows, dst.cols,
KERNEL_SIZE, SIGMA, first_pass);
} else {
hipLaunchKernelGGL(( process), dim3(gridY), dim3(blockY), shmem_size, 0, src, dst, dst.rows, dst.cols,
KERNEL_SIZE, SIGMA, first_pass);
}
}
| d299647a686991185143571108cd28d1396e82c0.cu | #include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#define TILE_SIZE 300
/**
* Clamps the value val in the interval [lo, high].
* Equivalent to max(lo, min(val, high)).
*
* @param val: value to clamp.
* @param lo: lower bound for the clamping.
* @param high: higher bound for the clamping.
* @return val clamped between lo and high.
*/
template< typename T > __device__ T clamp(T val, T lo, T high) {
return max(lo, min(val, high));
}
/**
* Returns the value of a Gaussian function with an standard
* deviation of sigma and an input value of x.
*
* @param x: input value of the Gaussian function.
* @param sigma: standard deviation of Gaussian function.
* @return result of the Gaussian function.
*/
__device__ float gauss_func(int x, float sigma) {
return exp(-(pow(x, 2.0) / (2.0 * pow(sigma, 2.0))));
}
__global__ void process(const cv::cuda::PtrStep<uchar3> src,
cv::cuda::PtrStep<uchar3> dst, int rows, int cols,
int kernel_size, int sigma, bool first_pass) {
int dst_x;
int dst_y;
if(first_pass) {
dst_x = TILE_SIZE * blockIdx.x + threadIdx.x-kernel_size;
dst_y = blockDim.y * blockIdx.y + threadIdx.y;
} else {
dst_x = blockDim.x * blockIdx.x + threadIdx.x;
dst_y = TILE_SIZE * blockIdx.y + threadIdx.y-kernel_size;
}
// Filter radius
const int kernel_div2 = kernel_size / 2;
// Create shared memory using externally passed size
extern __shared__ uchar3 tile[];
int px = clamp<float>(dst_x, 0, cols-1);
int py = clamp<float>(dst_y, 0, rows-1);
// Cache pixels in shared memory
if(first_pass) {
tile[threadIdx.x] = src(py, px);
} else {
tile[threadIdx.y] = src(py, px);
}
// Wait until all thread cache their pixes values
__syncthreads();
bool is_inside_tile;
if(first_pass) {
is_inside_tile =kernel_div2 <= threadIdx.x && threadIdx.x < TILE_SIZE + kernel_div2;
if (dst_x < cols && dst_y < rows && is_inside_tile) {
float3 val = make_float3(0, 0, 0);
float gauss_sum = 0;
for (int m = -kernel_div2; m <= kernel_div2; m++) {
float gauss_val = gauss_func(m, sigma);
gauss_sum += gauss_val;
int tx = threadIdx.x+m;
uchar3 pix = tile[tx];
val.x += (float)pix.x*gauss_val;
val.y += (float)pix.y*gauss_val;
val.z += (float)pix.z*gauss_val;
}
val.x = val.x/gauss_sum;
val.y = val.y/gauss_sum;
val.z = val.z/gauss_sum;
dst(dst_y, dst_x).x = val.x;
dst(dst_y, dst_x).y = val.y;
dst(dst_y, dst_x).z = val.z;
}
} else {
is_inside_tile = kernel_div2 <= threadIdx.y && threadIdx.y < TILE_SIZE + kernel_div2;
if (dst_x < cols && dst_y < rows && is_inside_tile) {
float3 val = make_float3(0, 0, 0);
float gauss_sum = 0;
for (int n = -kernel_div2; n <= kernel_div2; n++) {
float gauss_val = gauss_func(n, sigma);
gauss_sum += gauss_val;
int ty = threadIdx.y+n;
uchar3 pix = tile[ty];
val.x += (float)pix.x*gauss_val;
val.y += (float)pix.y*gauss_val;
val.z += (float)pix.z*gauss_val;
}
val.x = val.x/gauss_sum;
val.y = val.y/gauss_sum;
val.z = val.z/gauss_sum;
dst(dst_y, dst_x).x = val.x;
dst(dst_y, dst_x).y = val.y;
dst(dst_y, dst_x).z = val.z;
}
}
}
int divUp(int a, int b) {
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void startCUDA (cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, int KERNEL_SIZE,
float SIGMA, bool first_pass) {
const dim3 blockX(TILE_SIZE+KERNEL_SIZE);
const dim3 gridX(divUp(dst.cols, TILE_SIZE)+1, divUp(dst.rows, blockX.y));
const dim3 blockY(1, TILE_SIZE+KERNEL_SIZE);
const dim3 gridY(divUp(dst.cols, blockY.x), divUp(dst.rows, TILE_SIZE)+1);
// Create a tile to process pixels within a block's shared memory
int shmem_size = sizeof(uchar3)*(TILE_SIZE+KERNEL_SIZE);
if(first_pass) {
process<<<gridX, blockX, shmem_size>>>(src, dst, dst.rows, dst.cols,
KERNEL_SIZE, SIGMA, first_pass);
} else {
process<<<gridY, blockY, shmem_size>>>(src, dst, dst.rows, dst.cols,
KERNEL_SIZE, SIGMA, first_pass);
}
}
|
2c13ac7f1ffe2935382c0a8bb3f8862137a3dcdd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @File nbody.cu
*
* Implementation of the N-Body problem
*
* Parallel Computations on GPU (PCG 2020)
* Assignment no. 1 (cuda)
* Login: xstupi00
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
/**
* CUDA kernel to calculate gravitation velocity
* @param p - particles
* @param tmp_vel - temp array for velocities
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
// Computes the global index of thread within the grid
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Checks whether the thread is not outside the particles borders
if (i < N) {
// Loads the particle data covered by the individual thread
float pos_x = p.pos_x[i];
float pos_y = p.pos_y[i];
float pos_z = p.pos_z[i];
// Initialise of auxiliary accumulators of velocity
float tmp_vel_x = 0.0f;
float tmp_vel_y = 0.0f;
float tmp_vel_z = 0.0f;
// The iterations over all particles to compute the gravitation velocity to them
for (int j = 0; j < N; j++) {
// Instruction Level Parallelism
float s = -G * dt * p.weight[j];
// Computes the distance between the relevant particles
float r_x = pos_x - p.pos_x[j];
float r_y = pos_y - p.pos_y[j];
float r_z = pos_z - p.pos_z[j];
// Computes inverse distance between particles and their distances
float inv_dist = sqrtf(r_x * r_x + r_y * r_y + r_z * r_z);
// Computes the gravitation velocity (Fg_dt_m2_r)
s /= (inv_dist * inv_dist * inv_dist + FLT_MIN);
// The speed that a particle body receives due to the strength of the relevant particle
tmp_vel_x += (inv_dist > COLLISION_DISTANCE) ? r_x * s : 0.0f;
tmp_vel_y += (inv_dist > COLLISION_DISTANCE) ? r_y * s : 0.0f;
tmp_vel_z += (inv_dist > COLLISION_DISTANCE) ? r_z * s : 0.0f;
}
// Stores the final computed velocities of each particle to the auxiliary velocity vector
tmp_vel.x[i] = tmp_vel_x;
tmp_vel.y[i] = tmp_vel_y;
tmp_vel.z[i] = tmp_vel_z;
}
}// end of calculate_gravitation_velocity
//----------------------------------------------------------------------------------------------------------------------
/**
* CUDA kernel to calculate collision velocity
* @param p - particles
* @param tmp_vel - temp array for velocities
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
// Computes the global index of thread within the grid
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Checks whether the thread is not outside the particles borders
if (i < N) {
// Loads the particle data covered by the individual thread
float pos_x = p.pos_x[i];
float pos_y = p.pos_y[i];
float pos_z = p.pos_z[i];
float vel_x = p.vel_x[i];
float vel_y = p.vel_y[i];
float vel_z = p.vel_z[i];
float weight_i = p.weight[i];
// Initialise of auxiliary accumulators of velocity
float tmp_vel_x = 0.0f;
float tmp_vel_y = 0.0f;
float tmp_vel_z = 0.0f;
// The iterations over all particles to compute the collision velocity to them
for (int j = 0; j < N; j++) {
// Computes the distance between the relevant particles
float r_x = pos_x - p.pos_x[j];
float r_y = pos_y - p.pos_y[j];
float r_z = pos_z - p.pos_z[j];
// Loads the weight of the processing particle
float weight_j = p.weight[j];
// Computes inverse distance between particles and their distances
float inv_dist = sqrtf(r_x * r_x + r_y * r_y + r_z * r_z);
// Checks whether the particles are in the sufficient near distance for collision
if (inv_dist > 0.0f && inv_dist < COLLISION_DISTANCE) {
// Computes the temporary partial results to eliminate recalculation
float weight_diff = weight_i - weight_j;
float weight_sum = weight_i + weight_j;
float weight_j_x_2 = 2 * weight_j;
// Computes the collision velocities between the relevant particles and accumulate the results
tmp_vel_x += inv_dist > 0.0f ?
((weight_diff * vel_x + weight_j_x_2 * p.vel_x[j]) / weight_sum) - vel_x : 0.0f;
tmp_vel_y += inv_dist > 0.0f ?
((weight_diff * vel_y + weight_j_x_2 * p.vel_y[j]) / weight_sum) - vel_y : 0.0f;
tmp_vel_z += inv_dist > 0.0f ?
((weight_diff * vel_z + weight_j_x_2 * p.vel_z[j]) / weight_sum) - vel_z : 0.0f;
}
}
// Stores the final computed velocities of each particle to the auxiliary velocity vector
tmp_vel.x[i] += tmp_vel_x;
tmp_vel.y[i] += tmp_vel_y;
tmp_vel.z[i] += tmp_vel_z;
}
}// end of calculate_collision_velocity
//----------------------------------------------------------------------------------------------------------------------
/**
* CUDA kernel to update particles
* @param p - particles
* @param tmp_vel - temp array for velocities
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt)
{
// Computes the global index of thread within the grid
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Checks whether the thread is not outside the particles borders
if (i < N) {
// Updates the velocity of particles with respect to the computed gravitation and collision velocity
p.vel_x[i] += tmp_vel.x[i];
p.vel_y[i] += tmp_vel.y[i];
p.vel_z[i] += tmp_vel.z[i];
// Updates the positions of particles with respect to the updated velocity
p.pos_x[i] += p.vel_x[i] * dt;
p.pos_y[i] += p.vel_y[i] * dt;
p.pos_z[i] += p.vel_z[i] * dt;
}
}// end of update_particle
//----------------------------------------------------------------------------------------------------------------------
/**
* CUDA kernel to update particles
* @param p - particles
* @param comX - pointer to a center of mass position in X
* @param comY - pointer to a center of mass position in Y
* @param comZ - pointer to a center of mass position in Z
* @param comW - pointer to a center of mass weight
* @param lock - pointer to a user-implemented lock
* @param N - Number of particles
*/
__global__ void centerOfMass(t_particles p, float* comX, float* comY, float* comZ, float* comW, int* lock, const int N)
{
}// end of centerOfMass
//----------------------------------------------------------------------------------------------------------------------
/**
* CPU implementation of the Center of Mass calculation
* @param particles - All particles in the system
* @param N - Number of particles
*/
__host__ float4 centerOfMassCPU(MemDesc& memDesc)
{
float4 com = {0 ,0, 0, 0};
for(int i = 0; i < memDesc.getDataSize(); i++)
{
// Calculate the vector on the line connecting points and most recent position of center-of-mass
const float dx = memDesc.getPosX(i) - com.x;
const float dy = memDesc.getPosY(i) - com.y;
const float dz = memDesc.getPosZ(i) - com.z;
// Calculate weight ratio only if at least one particle isn't massless
const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f)
? ( memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f;
// Update position and weight of the center-of-mass according to the weight ration and vector
com.x += dx * dw;
com.y += dy * dw;
com.z += dz * dw;
com.w += memDesc.getWeight(i);
}
return com;
}// enf of centerOfMassCPU
//----------------------------------------------------------------------------------------------------------------------
| 2c13ac7f1ffe2935382c0a8bb3f8862137a3dcdd.cu | /**
* @File nbody.cu
*
* Implementation of the N-Body problem
*
* Parallel Computations on GPU (PCG 2020)
* Assignment no. 1 (cuda)
* Login: xstupi00
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
/**
* CUDA kernel to calculate gravitation velocity
* @param p - particles
* @param tmp_vel - temp array for velocities
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
// Computes the global index of thread within the grid
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Checks whether the thread is not outside the particles borders
if (i < N) {
// Loads the particle data covered by the individual thread
float pos_x = p.pos_x[i];
float pos_y = p.pos_y[i];
float pos_z = p.pos_z[i];
// Initialise of auxiliary accumulators of velocity
float tmp_vel_x = 0.0f;
float tmp_vel_y = 0.0f;
float tmp_vel_z = 0.0f;
// The iterations over all particles to compute the gravitation velocity to them
for (int j = 0; j < N; j++) {
// Instruction Level Parallelism
float s = -G * dt * p.weight[j];
// Computes the distance between the relevant particles
float r_x = pos_x - p.pos_x[j];
float r_y = pos_y - p.pos_y[j];
float r_z = pos_z - p.pos_z[j];
// Computes inverse distance between particles and their distances
float inv_dist = sqrtf(r_x * r_x + r_y * r_y + r_z * r_z);
// Computes the gravitation velocity (Fg_dt_m2_r)
s /= (inv_dist * inv_dist * inv_dist + FLT_MIN);
// The speed that a particle body receives due to the strength of the relevant particle
tmp_vel_x += (inv_dist > COLLISION_DISTANCE) ? r_x * s : 0.0f;
tmp_vel_y += (inv_dist > COLLISION_DISTANCE) ? r_y * s : 0.0f;
tmp_vel_z += (inv_dist > COLLISION_DISTANCE) ? r_z * s : 0.0f;
}
// Stores the final computed velocities of each particle to the auxiliary velocity vector
tmp_vel.x[i] = tmp_vel_x;
tmp_vel.y[i] = tmp_vel_y;
tmp_vel.z[i] = tmp_vel_z;
}
}// end of calculate_gravitation_velocity
//----------------------------------------------------------------------------------------------------------------------
/**
* CUDA kernel to calculate collision velocity
* @param p - particles
* @param tmp_vel - temp array for velocities
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt)
{
// Computes the global index of thread within the grid
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Checks whether the thread is not outside the particles borders
if (i < N) {
// Loads the particle data covered by the individual thread
float pos_x = p.pos_x[i];
float pos_y = p.pos_y[i];
float pos_z = p.pos_z[i];
float vel_x = p.vel_x[i];
float vel_y = p.vel_y[i];
float vel_z = p.vel_z[i];
float weight_i = p.weight[i];
// Initialise of auxiliary accumulators of velocity
float tmp_vel_x = 0.0f;
float tmp_vel_y = 0.0f;
float tmp_vel_z = 0.0f;
// The iterations over all particles to compute the collision velocity to them
for (int j = 0; j < N; j++) {
// Computes the distance between the relevant particles
float r_x = pos_x - p.pos_x[j];
float r_y = pos_y - p.pos_y[j];
float r_z = pos_z - p.pos_z[j];
// Loads the weight of the processing particle
float weight_j = p.weight[j];
// Computes inverse distance between particles and their distances
float inv_dist = sqrtf(r_x * r_x + r_y * r_y + r_z * r_z);
// Checks whether the particles are in the sufficient near distance for collision
if (inv_dist > 0.0f && inv_dist < COLLISION_DISTANCE) {
// Computes the temporary partial results to eliminate recalculation
float weight_diff = weight_i - weight_j;
float weight_sum = weight_i + weight_j;
float weight_j_x_2 = 2 * weight_j;
// Computes the collision velocities between the relevant particles and accumulate the results
tmp_vel_x += inv_dist > 0.0f ?
((weight_diff * vel_x + weight_j_x_2 * p.vel_x[j]) / weight_sum) - vel_x : 0.0f;
tmp_vel_y += inv_dist > 0.0f ?
((weight_diff * vel_y + weight_j_x_2 * p.vel_y[j]) / weight_sum) - vel_y : 0.0f;
tmp_vel_z += inv_dist > 0.0f ?
((weight_diff * vel_z + weight_j_x_2 * p.vel_z[j]) / weight_sum) - vel_z : 0.0f;
}
}
// Stores the final computed velocities of each particle to the auxiliary velocity vector
tmp_vel.x[i] += tmp_vel_x;
tmp_vel.y[i] += tmp_vel_y;
tmp_vel.z[i] += tmp_vel_z;
}
}// end of calculate_collision_velocity
//----------------------------------------------------------------------------------------------------------------------
/**
* CUDA kernel to update particles
* @param p - particles
* @param tmp_vel - temp array for velocities
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt)
{
// Computes the global index of thread within the grid
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Checks whether the thread is not outside the particles borders
if (i < N) {
// Updates the velocity of particles with respect to the computed gravitation and collision velocity
p.vel_x[i] += tmp_vel.x[i];
p.vel_y[i] += tmp_vel.y[i];
p.vel_z[i] += tmp_vel.z[i];
// Updates the positions of particles with respect to the updated velocity
p.pos_x[i] += p.vel_x[i] * dt;
p.pos_y[i] += p.vel_y[i] * dt;
p.pos_z[i] += p.vel_z[i] * dt;
}
}// end of update_particle
//----------------------------------------------------------------------------------------------------------------------
/**
* CUDA kernel to update particles
* @param p - particles
* @param comX - pointer to a center of mass position in X
* @param comY - pointer to a center of mass position in Y
* @param comZ - pointer to a center of mass position in Z
* @param comW - pointer to a center of mass weight
* @param lock - pointer to a user-implemented lock
* @param N - Number of particles
*/
__global__ void centerOfMass(t_particles p, float* comX, float* comY, float* comZ, float* comW, int* lock, const int N)
{
}// end of centerOfMass
//----------------------------------------------------------------------------------------------------------------------
/**
* CPU implementation of the Center of Mass calculation
* @param particles - All particles in the system
* @param N - Number of particles
*/
__host__ float4 centerOfMassCPU(MemDesc& memDesc)
{
float4 com = {0 ,0, 0, 0};
for(int i = 0; i < memDesc.getDataSize(); i++)
{
// Calculate the vector on the line connecting points and most recent position of center-of-mass
const float dx = memDesc.getPosX(i) - com.x;
const float dy = memDesc.getPosY(i) - com.y;
const float dz = memDesc.getPosZ(i) - com.z;
// Calculate weight ratio only if at least one particle isn't massless
const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f)
? ( memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f;
// Update position and weight of the center-of-mass according to the weight ration and vector
com.x += dx * dw;
com.y += dy * dw;
com.z += dz * dw;
com.w += memDesc.getWeight(i);
}
return com;
}// enf of centerOfMassCPU
//----------------------------------------------------------------------------------------------------------------------
|
4453fc276b5fa70bb07e98e64289d28bcab70207.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) {
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, stddev);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means);
}
THC_API void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, stddevs);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(add)(state, self, self, ScalarConvert<double, real>::to(mean));
}
THC_API void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means);
}
THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generateLogNormal<real>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimension)(state, t) == 2);
int64_t rows = THCTensor_(size)(state, t, 0);
int64_t cols = THCTensor_(size)(state, t, 1);
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
hipLaunchKernelGGL(( renormRowsL1<real>)
, dim3(grid), dim3(block), block.x * sizeof(real),
THCState_getCurrentStream(state), THCTensor_(data)(state, t),
rows, cols);
}
THC_API void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist));
Generator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimension)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
int64_t numDist =
inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0);
int64_t numCategoriesLong =
inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) :
THCTensor_(size)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
// It is possible that prob_dist is non-contiguous
THCTensor* probDistContig =
THCTensor_(newContiguous)(state, prob_dist);
// Restructure data for 2d
if (inputSize == 1) {
THCTensor_(resize2d)(state, probDistContig, 1, numCategories);
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
// get current device properties
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(real) * sizeof(accreal));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
hipLaunchKernelGGL(( sampleMultinomialOnce<real, accreal>)
, dim3(grid), dim3(block),
requiredShared,
THCState_getCurrentStream(state),
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, probDistContig));
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, probDistContig);
THCTensor_(copy)(state, origDist, probDistContig);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, probDistContig);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
hipLaunchKernelGGL(( sampleMultinomialWithReplacement)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
gen->gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
// Unfortunately, if prob_dist is contiguous already,
// newContiguous is not a private copy, so we have to restructure
// this too, so as to not affect prob_dist
THCTensor_(resize1d)(state, probDistContig, numCategories);
}
THCTensor_(free)(state, probDistContig);
}
THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THAssert(THCTensor_(isContiguous)(state, _probs));
int64_t inputsize = THCTensor_(nElement)(state, _probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
real one = ScalarConvert<int64_t, real>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
hipLaunchKernelGGL(( aliasMultinomialFilter)
, dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state) ,
THCTensor_(data)(state, _q),
THCTensor_(data)(state, _probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
hipLaunchKernelGGL(( aliasMultinomialSetup)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
real q_max = THCTensor_(maxall)(state, _q);
hipLaunchKernelGGL(( condDiv),
dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
}
THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
Generator* gen = THCRandom_getGenerator(state);
int64_t K = THCudaLongTensor_nElement(state, _J);
int64_t output_nelem = THCudaLongTensor_nElement(state, self);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor_(uniform)(state, uniform, 0, K);
THCTensor_(uniform)(state, bernoulli, 0, 1);
hipLaunchKernelGGL(( multinomialAliasDrawKernel)
, dim3(THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
}
THC_API void THCTensor_(rand)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(uniform)(state, r_, 0, 1);
}
void THCTensor_(randn)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(normal)(state, r_, 0, 1);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_bernoulli, double, double p, double, hiprand_uniform_double, x <= p)
#else
GENERATE_KERNEL1(generate_bernoulli, real, double p, float, hiprand_uniform, (ScalarConvert<bool, real>::to(x <= p)))
#endif
THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_bernoulli), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(bernoulli_Tensor)(THCState *state, THCTensor *self, THCTensor* p)
{
#if defined(THC_REAL_IS_FLOAT)
THCTensor_(bernoulli_FloatTensor)(state, self, p);
#elif defined(THC_REAL_IS_DOUBLE)
THCTensor_(bernoulli_DoubleTensor)(state, self, p);
#endif
}
#define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \
THC_API void THCTensor_(NAME)(THCState* state, \
THCTensor *self_, PROB_TYPE *probs_) \
{ \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, probs_)); \
ptrdiff_t size = THCTensor_(nElement)(state, self_); \
if (size == 0) return; \
Generator* gen = THCRandom_getGenerator(state); \
THCTensor *self = THCTensor_(newContiguous)(state, self_); \
PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \
ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \
real *result_data = THCTensor_(data)(state, self); \
PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \
\
THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \
\
hipLaunchKernelGGL(( generate_bernoulli_tensor), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), \
gen->gen_states, size, result_data, probs_data); \
\
PROB_TYPE##_free(state, probs); \
THCTensor_(freeCopyTo)(state, self, self_); \
}
DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float)
DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double)
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, real, double p, float, hiprand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p)))))
#endif
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
#define CURAND64(STATE) (((uint64_t)hiprand(&state[blockIdx.x])) << 32) | (uint64_t)hiprand(&state[blockIdx.x])
GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, hiprand, (real)(x % range + base))
GENERATE_KERNEL2(generate_random_64, real, int64_t base, uint64_t range, uint64_t, CURAND64, (real)(x % range + base))
#elif defined(THC_REAL_IS_HALF)
GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, hiprand, (ScalarConvert<uint32_t, real>::to(x % range + base)))
#else
GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, hiprand, (real)(x % range + base))
#endif
THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val)
{
THArgCheck(min_val < max_val, 2,
"max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
uint64_t range = max_val - min_val;
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
if (range > 1ULL << 32) {
hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, min_val, range);
} else {
#endif
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, min_val, range);
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
}
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val)
{
THCTensor_(clampedRandom)(state, self_, 0LL, max_val);
};
#define HLF_MANT_DIG 11
THC_API void THCTensor_(random)(THCState* state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
#if defined(THC_REAL_IS_HALF)
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, 0UL, (1UL << HLF_MANT_DIG) + 1);
#elif defined(THC_REAL_IS_FLOAT)
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, 0UL, (1UL << FLT_MANT_DIG) + 1);
#elif defined(THC_REAL_IS_DOUBLE)
hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, 0ULL, (1ULL << DBL_MANT_DIG) + 1);
#elif defined(THC_REAL_IS_LONG)
hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, 0ULL, static_cast<uint64_t>(std::numeric_limits<real>::max()) + 1);
#else
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->gen_states, size, data, 0UL, static_cast<uint32_t>(std::numeric_limits<real>::max()) + 1);
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef HLF_MANT_DIG
#undef CURAND64
#undef NUM_BLOCKS
#endif
| 4453fc276b5fa70bb07e98e64289d28bcab70207.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generate_normal<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) {
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, stddev);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means);
}
THC_API void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, stddevs);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(add)(state, self, self, ScalarConvert<double, real>::to(mean));
}
THC_API void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means);
}
THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generateLogNormal<real><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimension)(state, t) == 2);
int64_t rows = THCTensor_(size)(state, t, 0);
int64_t cols = THCTensor_(size)(state, t, 1);
cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
renormRowsL1<real>
<<<grid, block, block.x * sizeof(real),
THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t),
rows, cols);
}
THC_API void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist));
Generator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimension)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
int64_t numDist =
inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0);
int64_t numCategoriesLong =
inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) :
THCTensor_(size)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
// It is possible that prob_dist is non-contiguous
THCTensor* probDistContig =
THCTensor_(newContiguous)(state, prob_dist);
// Restructure data for 2d
if (inputSize == 1) {
THCTensor_(resize2d)(state, probDistContig, 1, numCategories);
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
// get current device properties
cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(real) * sizeof(accreal));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
sampleMultinomialOnce<real, accreal>
<<<grid, block,
requiredShared,
THCState_getCurrentStream(state)>>>(
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, probDistContig));
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, probDistContig);
THCTensor_(copy)(state, origDist, probDistContig);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, probDistContig);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
sampleMultinomialWithReplacement
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
sampleMultinomialWithoutReplacement
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
// Unfortunately, if prob_dist is contiguous already,
// newContiguous is not a private copy, so we have to restructure
// this too, so as to not affect prob_dist
THCTensor_(resize1d)(state, probDistContig, numCategories);
}
THCTensor_(free)(state, probDistContig);
}
THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THAssert(THCTensor_(isContiguous)(state, _probs));
int64_t inputsize = THCTensor_(nElement)(state, _probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
real one = ScalarConvert<int64_t, real>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
aliasMultinomialFilter
<<<inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state) >>>(
THCTensor_(data)(state, _q),
THCTensor_(data)(state, _probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
aliasMultinomialSetup
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
real q_max = THCTensor_(maxall)(state, _q);
condDiv<<<
inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
}
THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
Generator* gen = THCRandom_getGenerator(state);
int64_t K = THCudaLongTensor_nElement(state, _J);
int64_t output_nelem = THCudaLongTensor_nElement(state, self);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor_(uniform)(state, uniform, 0, K);
THCTensor_(uniform)(state, bernoulli, 0, 1);
multinomialAliasDrawKernel
<<<THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
}
THC_API void THCTensor_(rand)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(uniform)(state, r_, 0, 1);
}
void THCTensor_(randn)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(normal)(state, r_, 0, 1);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_bernoulli, double, double p, double, curand_uniform_double, x <= p)
#else
GENERATE_KERNEL1(generate_bernoulli, real, double p, float, curand_uniform, (ScalarConvert<bool, real>::to(x <= p)))
#endif
THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generate_bernoulli<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(bernoulli_Tensor)(THCState *state, THCTensor *self, THCTensor* p)
{
#if defined(THC_REAL_IS_FLOAT)
THCTensor_(bernoulli_FloatTensor)(state, self, p);
#elif defined(THC_REAL_IS_DOUBLE)
THCTensor_(bernoulli_DoubleTensor)(state, self, p);
#endif
}
#define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \
THC_API void THCTensor_(NAME)(THCState* state, \
THCTensor *self_, PROB_TYPE *probs_) \
{ \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, probs_)); \
ptrdiff_t size = THCTensor_(nElement)(state, self_); \
if (size == 0) return; \
Generator* gen = THCRandom_getGenerator(state); \
THCTensor *self = THCTensor_(newContiguous)(state, self_); \
PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \
ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \
real *result_data = THCTensor_(data)(state, self); \
PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \
\
THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \
\
generate_bernoulli_tensor<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( \
gen->gen_states, size, result_data, probs_data); \
\
PROB_TYPE##_free(state, probs); \
THCTensor_(freeCopyTo)(state, self, self_); \
}
DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float)
DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double)
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, real, double p, float, curand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p)))))
#endif
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
#define CURAND64(STATE) (((uint64_t)curand(&state[blockIdx.x])) << 32) | (uint64_t)curand(&state[blockIdx.x])
GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, (real)(x % range + base))
GENERATE_KERNEL2(generate_random_64, real, int64_t base, uint64_t range, uint64_t, CURAND64, (real)(x % range + base))
#elif defined(THC_REAL_IS_HALF)
GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, (ScalarConvert<uint32_t, real>::to(x % range + base)))
#else
GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, (real)(x % range + base))
#endif
THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val)
{
THArgCheck(min_val < max_val, 2,
"max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
uint64_t range = max_val - min_val;
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
if (range > 1ULL << 32) {
generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, min_val, range);
} else {
#endif
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, min_val, range);
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
}
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
THC_API void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val)
{
THCTensor_(clampedRandom)(state, self_, 0LL, max_val);
};
#define HLF_MANT_DIG 11
THC_API void THCTensor_(random)(THCState* state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
Generator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
real *data = THCTensor_(data)(state, self);
#if defined(THC_REAL_IS_HALF)
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, 0UL, (1UL << HLF_MANT_DIG) + 1);
#elif defined(THC_REAL_IS_FLOAT)
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, 0UL, (1UL << FLT_MANT_DIG) + 1);
#elif defined(THC_REAL_IS_DOUBLE)
generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, 0ULL, (1ULL << DBL_MANT_DIG) + 1);
#elif defined(THC_REAL_IS_LONG)
generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, 0ULL, static_cast<uint64_t>(std::numeric_limits<real>::max()) + 1);
#else
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, size, data, 0UL, static_cast<uint32_t>(std::numeric_limits<real>::max()) + 1);
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef HLF_MANT_DIG
#undef CURAND64
#undef NUM_BLOCKS
#endif
|
694010f18acb544451b8d5312af953ea8f0ba628.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include <lib.cuh>
__global__ void kernel(int* a, int* b) {
a[threadIdx.x] += b[threadIdx.x];
}
int kernelWrapper(int* a, int* b, const unsigned int length) {
checkCudaErrors(hipSetDevice(0));
int* ad;
int* bd;
const unsigned int intSize = length * sizeof(int);
checkCudaErrors(hipMalloc((void**)&ad, intSize));
checkCudaErrors(hipMalloc((void**)&bd, intSize));
checkCudaErrors(hipMemcpy(ad, a, intSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(bd, b, intSize, hipMemcpyHostToDevice));
dim3 dimBlock(length, 1);
dim3 dimGrid(1, 1);
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(a, ad, intSize, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(ad));
checkCudaErrors(hipFree(bd));
printf("%s\n", a);
return 0;
} | 694010f18acb544451b8d5312af953ea8f0ba628.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include <lib.cuh>
__global__ void kernel(int* a, int* b) {
a[threadIdx.x] += b[threadIdx.x];
}
int kernelWrapper(int* a, int* b, const unsigned int length) {
checkCudaErrors(cudaSetDevice(0));
int* ad;
int* bd;
const unsigned int intSize = length * sizeof(int);
checkCudaErrors(cudaMalloc((void**)&ad, intSize));
checkCudaErrors(cudaMalloc((void**)&bd, intSize));
checkCudaErrors(cudaMemcpy(ad, a, intSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(bd, b, intSize, cudaMemcpyHostToDevice));
dim3 dimBlock(length, 1);
dim3 dimGrid(1, 1);
kernel<<<dimGrid, dimBlock>>>(ad, bd);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(a, ad, intSize, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(ad));
checkCudaErrors(cudaFree(bd));
printf("%s\n", a);
return 0;
} |
865e9613d6dac6e7704f69da48303b73493bcbf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<math.h>
#include "parse.cu"
#include <sys/time.h>
#define TILE_WIDTH 16
/* copied from mpbench */
#define TIMER_CLEAR (tv1.tv_sec = tv1.tv_usec = tv2.tv_sec = tv2.tv_usec = 0)
#define TIMER_START gettimeofday(&tv1, (struct timezone*)0)
#define TIMER_ELAPSED ((tv2.tv_usec-tv1.tv_usec)+((tv2.tv_sec-tv1.tv_sec)*1000000))
#define TIMER_STOP gettimeofday(&tv2, (struct timezone*)0)
struct timeval tv1,tv2;
__constant__ short int Gx[][3]={{-1,0,1},{-2,0,+2},{-1,0,+1}};
__constant__ short int Gy[][3]={{1,2,1},{0,0,0},{-1,-2,-1}};
__global__ void sobel_edge_detection(unsigned char *device_p, unsigned char *device_edge,int rows, int columns)
{
int ty=threadIdx.y;
int tx=threadIdx.x;
int by=blockIdx.y;
int bx=blockIdx.x;
int row=by*TILE_WIDTH+ty;
int column=bx*TILE_WIDTH+tx;
int sumx[9];
int sumy[9];
int sum;
int tempx=0;
int tempy=0;
int I;
int J;
if(row<rows && column<columns)
{
if(row==0 || row==rows-1|| column==0 || column==columns-1)
sum=0;
else{
I=-1;
J=-1;
sumx[0]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[0]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=0;
sumx[1]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[1]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=1;
sumx[2]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[2]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
I=0;
J=-1;
sumx[3]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[3]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=0;
sumx[4]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[4]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=1;
sumx[5]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[5]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
I=1;
J=-1;
sumx[6]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[6]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=0;
sumx[7]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[7]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=1;
sumx[8]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[8]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
for(I=0;I<9;I++){
tempx+=sumx[I];
tempy+=sumy[I];
}
sum=abs(tempx)+abs(tempy);
}
if(sum>255)sum=255;
*(device_edge+row*columns+column)=255-sum;
}
}
int main(int argc, char **argv)
{
FILE *bmpinput;
FILE *bmpoutput;
unsigned long int num_rows;
unsigned long int num_columns;
unsigned long int num_colors;
unsigned char *host_p;
unsigned char *device_p;
unsigned char *host_edge;
unsigned char *device_edge;
hipError_t err;
char header[3];
clock_t t_start;
clock_t t_end;
if(argc!=3)
{
printf("<usuage> agruments mismatched\n");
exit(0);
}
if((bmpinput=fopen(argv[1],"rb"))==NULL)
{
printf("could not open input bitmap file\n");
exit(0);
}
if((bmpoutput=fopen(argv[2],"wb"))==NULL)
{
printf("could not open output bitmap file\n");
exit(0);
}
//saving header information
fscanf(bmpinput,"%s",header);
fscanf(bmpinput,"%lu %lu",&num_columns, &num_rows);
fscanf(bmpinput,"%lu",&num_colors);
printf("num_columns:%lu\n",num_columns);
printf("num_rows:%lu\n",num_rows);
printf("num_colors:%lu\n",num_colors);
fprintf(bmpoutput,"%s\n",header);
fprintf(bmpoutput,"%lu %lu\n",num_columns,num_rows);
fprintf(bmpoutput,"%lu\n",num_colors);
// host_p=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
hipHostMalloc((void**)&host_p,sizeof(unsigned char)*num_rows*num_columns,hipHostMallocDefault);
fetch_image_data(bmpinput,host_p,num_rows,num_columns);
//print_read_data(p,num_rows,num_columns);
//memory allocation for host to store the final result
// host_edge=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
hipHostMalloc((void**)&host_edge,sizeof(unsigned char)*num_rows*num_columns,hipHostMallocDefault);
//memory allocation for device pointer used by kernel
err=hipMalloc((void**)&device_p,sizeof(unsigned char)*num_rows*num_columns);
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__,__LINE__);
return 0;
}
hipMalloc((void**)&device_edge,sizeof(unsigned char)*num_rows*num_columns);
hipMemcpy(device_p,host_p,sizeof(unsigned char)*num_rows*num_columns,hipMemcpyHostToDevice);
//grid and thread block allocation
dim3 dimGrid( (num_columns-1) / TILE_WIDTH + 1 , (num_rows-1) / TILE_WIDTH + 1,1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
//Launching kernel
// TIMER_CLEAR;
// TIMER_START;
t_start=clock();
hipLaunchKernelGGL(( sobel_edge_detection), dim3(dimGrid),dim3(dimBlock), 0, 0, device_p,device_edge,num_rows,num_columns);
hipDeviceSynchronize();
t_end=clock();
// TIMER_STOP;
// printf("Time elapsed = %0.8f seconds\n",TIMER_ELAPSED/1000000.0);
printf("Time elapsed = %0.8f seconds\n",(t_end-t_start)/(float)CLOCKS_PER_SEC);
hipMemcpy(host_edge,device_edge,sizeof(unsigned char)*num_rows*num_columns,hipMemcpyDeviceToHost);
//print_read_data(host_edge,num_rows,num_columns);
copy_fetch_data(bmpoutput,host_edge,num_rows,num_columns);
hipFree(device_p);
hipFree(device_edge);
hipHostFree(host_p);
hipHostFree(host_edge);
fclose(bmpinput);
fclose(bmpoutput);
return 0;
}
| 865e9613d6dac6e7704f69da48303b73493bcbf0.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<math.h>
#include "parse.cu"
#include <sys/time.h>
#define TILE_WIDTH 16
/* copied from mpbench */
#define TIMER_CLEAR (tv1.tv_sec = tv1.tv_usec = tv2.tv_sec = tv2.tv_usec = 0)
#define TIMER_START gettimeofday(&tv1, (struct timezone*)0)
#define TIMER_ELAPSED ((tv2.tv_usec-tv1.tv_usec)+((tv2.tv_sec-tv1.tv_sec)*1000000))
#define TIMER_STOP gettimeofday(&tv2, (struct timezone*)0)
struct timeval tv1,tv2;
__constant__ short int Gx[][3]={{-1,0,1},{-2,0,+2},{-1,0,+1}};
__constant__ short int Gy[][3]={{1,2,1},{0,0,0},{-1,-2,-1}};
__global__ void sobel_edge_detection(unsigned char *device_p, unsigned char *device_edge,int rows, int columns)
{
int ty=threadIdx.y;
int tx=threadIdx.x;
int by=blockIdx.y;
int bx=blockIdx.x;
int row=by*TILE_WIDTH+ty;
int column=bx*TILE_WIDTH+tx;
int sumx[9];
int sumy[9];
int sum;
int tempx=0;
int tempy=0;
int I;
int J;
if(row<rows && column<columns)
{
if(row==0 || row==rows-1|| column==0 || column==columns-1)
sum=0;
else{
I=-1;
J=-1;
sumx[0]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[0]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=0;
sumx[1]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[1]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=1;
sumx[2]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[2]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
I=0;
J=-1;
sumx[3]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[3]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=0;
sumx[4]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[4]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=1;
sumx[5]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[5]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
I=1;
J=-1;
sumx[6]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[6]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=0;
sumx[7]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[7]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
J=1;
sumx[8]=(int)(*(device_p+(I+row)*columns+J+column))*Gx[I+1][J+1];
sumy[8]=(int)(*(device_p+(I+row)*columns+J+column))*Gy[I+1][J+1];
for(I=0;I<9;I++){
tempx+=sumx[I];
tempy+=sumy[I];
}
sum=abs(tempx)+abs(tempy);
}
if(sum>255)sum=255;
*(device_edge+row*columns+column)=255-sum;
}
}
int main(int argc, char **argv)
{
FILE *bmpinput;
FILE *bmpoutput;
unsigned long int num_rows;
unsigned long int num_columns;
unsigned long int num_colors;
unsigned char *host_p;
unsigned char *device_p;
unsigned char *host_edge;
unsigned char *device_edge;
cudaError_t err;
char header[3];
clock_t t_start;
clock_t t_end;
if(argc!=3)
{
printf("<usuage> agruments mismatched\n");
exit(0);
}
if((bmpinput=fopen(argv[1],"rb"))==NULL)
{
printf("could not open input bitmap file\n");
exit(0);
}
if((bmpoutput=fopen(argv[2],"wb"))==NULL)
{
printf("could not open output bitmap file\n");
exit(0);
}
//saving header information
fscanf(bmpinput,"%s",header);
fscanf(bmpinput,"%lu %lu",&num_columns, &num_rows);
fscanf(bmpinput,"%lu",&num_colors);
printf("num_columns:%lu\n",num_columns);
printf("num_rows:%lu\n",num_rows);
printf("num_colors:%lu\n",num_colors);
fprintf(bmpoutput,"%s\n",header);
fprintf(bmpoutput,"%lu %lu\n",num_columns,num_rows);
fprintf(bmpoutput,"%lu\n",num_colors);
// host_p=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
cudaHostAlloc((void**)&host_p,sizeof(unsigned char)*num_rows*num_columns,cudaHostAllocDefault);
fetch_image_data(bmpinput,host_p,num_rows,num_columns);
//print_read_data(p,num_rows,num_columns);
//memory allocation for host to store the final result
// host_edge=(unsigned char *)malloc(sizeof(unsigned char)*num_rows*num_columns);
cudaHostAlloc((void**)&host_edge,sizeof(unsigned char)*num_rows*num_columns,cudaHostAllocDefault);
//memory allocation for device pointer used by kernel
err=cudaMalloc((void**)&device_p,sizeof(unsigned char)*num_rows*num_columns);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__,__LINE__);
return 0;
}
cudaMalloc((void**)&device_edge,sizeof(unsigned char)*num_rows*num_columns);
cudaMemcpy(device_p,host_p,sizeof(unsigned char)*num_rows*num_columns,cudaMemcpyHostToDevice);
//grid and thread block allocation
dim3 dimGrid( (num_columns-1) / TILE_WIDTH + 1 , (num_rows-1) / TILE_WIDTH + 1,1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
//Launching kernel
// TIMER_CLEAR;
// TIMER_START;
t_start=clock();
sobel_edge_detection<<<dimGrid,dimBlock>>>(device_p,device_edge,num_rows,num_columns);
cudaThreadSynchronize();
t_end=clock();
// TIMER_STOP;
// printf("Time elapsed = %0.8f seconds\n",TIMER_ELAPSED/1000000.0);
printf("Time elapsed = %0.8f seconds\n",(t_end-t_start)/(float)CLOCKS_PER_SEC);
cudaMemcpy(host_edge,device_edge,sizeof(unsigned char)*num_rows*num_columns,cudaMemcpyDeviceToHost);
//print_read_data(host_edge,num_rows,num_columns);
copy_fetch_data(bmpoutput,host_edge,num_rows,num_columns);
cudaFree(device_p);
cudaFree(device_edge);
cudaFreeHost(host_p);
cudaFreeHost(host_edge);
fclose(bmpinput);
fclose(bmpoutput);
return 0;
}
|
6ca9bf7aa91c13c06a897771e34966af1391d864.hip | // !!! This is a file automatically generated by hipify!!!
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include "MWCNNLayerImpl.hpp"
#include <math.h>
#include <iostream>
#include <cassert>
#include <fstream>
#if INT8_ENABLED
#include <fstream>
#include <iterator>
#include "MWBatchStream.hpp"
#define XSTR(x) #x
#define STR(x) XSTR(x)
#endif
using namespace nvinfer1; using namespace nvcaffeparser1; void
CHECK(hipError_t status) { if (status != 0) { std::cout << "Cuda failure: " <<
status; abort(); } } class Logger : public ILogger { void log(Severity
severity, const char* msg) override { if (severity != Severity::kINFO){
std::cout << msg << std::endl; } if (MWCNNLayer::isDebuggingEnabled()){ if
(severity == Severity::kINFO){ std::ofstream logfile;
logfile.open("MW_TensorRT_log.txt" , std::ofstream::out | std::ofstream::app);
logfile << msg <<"\n"; logfile.close(); } } } }; static Logger gLogger;
#if INT8_ENABLED
std::string getFilePath(std:: string fileS, std::string &path) { char*
usrDataPath = NULL; usrDataPath = getenv("USER_DL_DATA_PATH"); if(usrDataPath
!= NULL) { path = usrDataPath; } else { path = STR(MW_DL_DATA_PATH); } path =
path + "/tensorrt"; size_t fNamePos = fileS.find_last_of("/\\"); if(fNamePos !=
std::string::npos) { std::string fileN(fileS.substr(fNamePos)); fileS = path +
fileN; } else { fileS = path + fileS; } return fileS; } std::string
gvalidDatapath; void getValidDataPath(const char* fileName, char
*validDatapath) { FILE* fp = fopen(fileName, "rb"); std::string
fileS(fileName); if (!fp) {
#ifdef MW_DL_DATA_PATH
std::string path; fileS = getFilePath(fileS,path); fp = fopen(fileS.c_str(),
"rb"); if(fp != NULL) { fclose(fp); gvalidDatapath = path;
strcpy(validDatapath,fileS.c_str()); } else { strcpy(validDatapath,fileName); }
#else
size_t pos = 0;
#if defined(_WIN32) || defined(_WIN64)
char delim_unix[] = "/"; char delim_win[] = "\\"; while(((pos =
fileS.find(delim_unix)) != std::string::npos) || ((pos = fileS.find(delim_win))
!= std::string::npos))
#else
char delim_unix[] = "/"; while((pos = fileS.find(delim_unix)) != std::string::npos)
#endif
{ if (pos == (fileS.size() - 1)) { fileS = ""; break; } fileS =
fileS.substr(pos+1); fp = fopen(fileS.c_str(), "rb"); if(fp != NULL) {
fclose(fp); strcpy(validDatapath, fileS.c_str()); gvalidDatapath =
fileS.substr(0,fileS.find_last_of("/\\")); break; } else{ strcpy(validDatapath,
fileName); } }
#endif
} else { fclose(fp); strcpy(validDatapath, fileName); gvalidDatapath
=validDatapath; gvalidDatapath =
gvalidDatapath.substr(0,gvalidDatapath.find_last_of("/\\")); } }
#endif
void MWTargetNetworkImpl::setBatchSize(int aBatchSize){ batchSize =
aBatchSize; } void MWTargetNetworkImpl::setIsSequenceNetwork(bool
aIsSequenceNetwork){ isSequenceNetwork = aIsSequenceNetwork; } void
MWTargetNetworkImpl::doInference(int batchSize) { const ICudaEngine& engine =
context->getEngine(); hipStream_t stream; CHECK(hipStreamCreate(&stream)); if
(this->isSequenceNetwork){ context->enqueue(1, m_buffers, stream, nullptr); }
else{ context->enqueue(batchSize, m_buffers, stream, nullptr); }
hipStreamSynchronize(stream); hipStreamDestroy(stream); }
MWTargetNetworkImpl::MWTargetNetworkImpl() : network(0) , builder(0) ,
engine(0) , context(0) , PiMNTwjpqwsGWomVWqdO(0) , m_buffers(0) { } void
MWTargetNetworkImpl::preSetup() { PiMNTwjpqwsGWomVWqdO = new cudnnHandle_t;
cudnnCreate(PiMNTwjpqwsGWomVWqdO); builder = createInferBuilder(gLogger); }
void MWTargetNetworkImpl::allocate(int, int) { network =
builder->createNetwork(); } void MWTargetNetworkImpl::postSetup(MWCNNLayer*
layers[], int numLayers, int layerIdxs[], int portIdxs[], int numOuts) {
markOutputs(layers, layerIdxs, numOuts); std::map<int, std::pair<float*,
std::string> > buffers; setupBuffers(layers, layerIdxs, portIdxs, numOuts, buffers);
#if INT8_ENABLED
bool useINT8 = builder->platformHasFastInt8(); if(!useINT8){ char buffer[100]; int numElem = sprintf(buffer,"#### INT8 mode is not supported on GPU available on the current machine! ####\n"); throw std::runtime_error(buffer); } else{ builder->setInt8Mode(1); } int trainBatchCount=0; while(1) { char filename[500]; char filename1[500]; sprintf(filename,"|>targetdir<|/tensorrt/batch%d",trainBatchCount++); getValidDataPath(filename,filename1); FILE *fp = fopen(filename1,"rb"); if(fp==NULL) { trainBatchCount-=1; break; } fclose(fp); } BatchStream calibrationStream(trainBatchCount); Int8EntropyCalibrator calibrator(calibrationStream, 0); builder->setAverageFindIterations(1); builder->setMinFindIterations(1); builder->setDebugSync(true); builder->setInt8Calibrator(&calibrator);
#endif
#if FP16_ENABLED
bool useFp16 = builder->platformHasFastFp16(); if(useFp16){ builder->setFp16Mode(1); } else{ printf("#### FP16 mode is not supported on GPU available on the current machine. Falling back to FP32 ####\n"); }
#endif
builder->setMaxBatchSize(batchSize); unsigned int wsize = 1 << 30;
builder->setMaxWorkspaceSize(wsize); engine =
builder->buildCudaEngine(*network); hipError_t err = hipGetLastError(); if
(err != hipSuccess) { builder->setMaxWorkspaceSize(1 << 26); engine =
builder->buildCudaEngine(*network); hipError_t err = hipGetLastError(); if
(err != hipSuccess) { builder = 0; engine = 0; network = 0; CUDA_CALL(err); }
} context = engine->createExecutionContext(); m_buffers = (void**) new
float*[buffers.size()]; for (std::map<int, std::pair<float*, std::string>
>::iterator it = buffers.begin(); it != buffers.end(); ++it) { int
binding_index = engine->getBindingIndex((it->second.second).c_str());
m_buffers[binding_index] = it->second.first; } network->destroy(); } void
MWTargetNetworkImpl::markOutputs(MWCNNLayer* layers[], int layerIdxs[], int
numOuts){ for (int k = 0; k < numOuts; k++) { int layerIdx = layerIdxs[k];
MWCNNLayer* layer = layers[layerIdx]; ITensor* itensor =
MWCNNLayerImpl::getITensor(layer->getOutputTensor(0)); char layerIdxStr[20];
sprintf(layerIdxStr, "output%d", layerIdx); itensor->setName(layerIdxStr);
network->markOutput(*itensor); } } void
MWTargetNetworkImpl::setupBuffers(MWCNNLayer* layers[], int layerIdxs[], int
portIdxs[], int numOuts, std::map<int, std::pair<float*, std::string> > &
buffers) { float* buffer = getBuffer(layers[0], 0, 0); auto inputITensor =
MWCNNLayerImpl::getITensor(layers[0]->getOutputTensor(0)); buffers[0] =
std::make_pair(buffer, std::string(inputITensor->getName())); for(int k = 0; k
< numOuts; k++) { int layerIdx = layerIdxs[k]; MWCNNLayer* layer =
layers[layerIdx]; ITensor* itensor =
MWCNNLayerImpl::getITensor(layer->getOutputTensor(0)); float* buffer =
getBuffer(layer, 0, portIdxs[k]); buffers[k+1] = std::make_pair(buffer,
std::string(itensor->getName())); } } float*
MWTargetNetworkImpl::getBuffer(MWCNNLayer* layer, int layerIdx, int portIdx) {
MWTensor* opTensor = layer->getOutputTensor(portIdx); float* data =
opTensor->getData<float>(); if (!data) { CUDA_CALL(hipMalloc((void**)&data,
sizeof(float) * opTensor->getNumElements())); } opTensor->setData(data); return
data; } cudnnHandle_t* MWTargetNetworkImpl::getCudnnHandle() { return
PiMNTwjpqwsGWomVWqdO; } void MWTargetNetworkImpl::deallocate() { if
(m_buffers) { delete[] m_buffers; m_buffers = 0; } if (hipFree(0) !=
hipErrorDeinitialized) { if (context) { context->destroy(); context = 0; }
if (engine) { engine->destroy(); engine = 0; } } } void
MWTargetNetworkImpl::cleanup() { if (builder) { builder->destroy(); builder =
0; } if (PiMNTwjpqwsGWomVWqdO) { cudnnDestroy(*PiMNTwjpqwsGWomVWqdO);
delete PiMNTwjpqwsGWomVWqdO; PiMNTwjpqwsGWomVWqdO = 0; } }
MWTargetNetworkImpl::~MWTargetNetworkImpl() { } | 6ca9bf7aa91c13c06a897771e34966af1391d864.cu | #include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include "MWCNNLayerImpl.hpp"
#include <math.h>
#include <iostream>
#include <cassert>
#include <fstream>
#if INT8_ENABLED
#include <fstream>
#include <iterator>
#include "MWBatchStream.hpp"
#define XSTR(x) #x
#define STR(x) XSTR(x)
#endif
using namespace nvinfer1; using namespace nvcaffeparser1; void
CHECK(cudaError_t status) { if (status != 0) { std::cout << "Cuda failure: " <<
status; abort(); } } class Logger : public ILogger { void log(Severity
severity, const char* msg) override { if (severity != Severity::kINFO){
std::cout << msg << std::endl; } if (MWCNNLayer::isDebuggingEnabled()){ if
(severity == Severity::kINFO){ std::ofstream logfile;
logfile.open("MW_TensorRT_log.txt" , std::ofstream::out | std::ofstream::app);
logfile << msg <<"\n"; logfile.close(); } } } }; static Logger gLogger;
#if INT8_ENABLED
std::string getFilePath(std:: string fileS, std::string &path) { char*
usrDataPath = NULL; usrDataPath = getenv("USER_DL_DATA_PATH"); if(usrDataPath
!= NULL) { path = usrDataPath; } else { path = STR(MW_DL_DATA_PATH); } path =
path + "/tensorrt"; size_t fNamePos = fileS.find_last_of("/\\"); if(fNamePos !=
std::string::npos) { std::string fileN(fileS.substr(fNamePos)); fileS = path +
fileN; } else { fileS = path + fileS; } return fileS; } std::string
gvalidDatapath; void getValidDataPath(const char* fileName, char
*validDatapath) { FILE* fp = fopen(fileName, "rb"); std::string
fileS(fileName); if (!fp) {
#ifdef MW_DL_DATA_PATH
std::string path; fileS = getFilePath(fileS,path); fp = fopen(fileS.c_str(),
"rb"); if(fp != NULL) { fclose(fp); gvalidDatapath = path;
strcpy(validDatapath,fileS.c_str()); } else { strcpy(validDatapath,fileName); }
#else
size_t pos = 0;
#if defined(_WIN32) || defined(_WIN64)
char delim_unix[] = "/"; char delim_win[] = "\\"; while(((pos =
fileS.find(delim_unix)) != std::string::npos) || ((pos = fileS.find(delim_win))
!= std::string::npos))
#else
char delim_unix[] = "/"; while((pos = fileS.find(delim_unix)) != std::string::npos)
#endif
{ if (pos == (fileS.size() - 1)) { fileS = ""; break; } fileS =
fileS.substr(pos+1); fp = fopen(fileS.c_str(), "rb"); if(fp != NULL) {
fclose(fp); strcpy(validDatapath, fileS.c_str()); gvalidDatapath =
fileS.substr(0,fileS.find_last_of("/\\")); break; } else{ strcpy(validDatapath,
fileName); } }
#endif
} else { fclose(fp); strcpy(validDatapath, fileName); gvalidDatapath
=validDatapath; gvalidDatapath =
gvalidDatapath.substr(0,gvalidDatapath.find_last_of("/\\")); } }
#endif
void MWTargetNetworkImpl::setBatchSize(int aBatchSize){ batchSize =
aBatchSize; } void MWTargetNetworkImpl::setIsSequenceNetwork(bool
aIsSequenceNetwork){ isSequenceNetwork = aIsSequenceNetwork; } void
MWTargetNetworkImpl::doInference(int batchSize) { const ICudaEngine& engine =
context->getEngine(); cudaStream_t stream; CHECK(cudaStreamCreate(&stream)); if
(this->isSequenceNetwork){ context->enqueue(1, m_buffers, stream, nullptr); }
else{ context->enqueue(batchSize, m_buffers, stream, nullptr); }
cudaStreamSynchronize(stream); cudaStreamDestroy(stream); }
MWTargetNetworkImpl::MWTargetNetworkImpl() : network(0) , builder(0) ,
engine(0) , context(0) , PiMNTwjpqwsGWomVWqdO(0) , m_buffers(0) { } void
MWTargetNetworkImpl::preSetup() { PiMNTwjpqwsGWomVWqdO = new cudnnHandle_t;
cudnnCreate(PiMNTwjpqwsGWomVWqdO); builder = createInferBuilder(gLogger); }
void MWTargetNetworkImpl::allocate(int, int) { network =
builder->createNetwork(); } void MWTargetNetworkImpl::postSetup(MWCNNLayer*
layers[], int numLayers, int layerIdxs[], int portIdxs[], int numOuts) {
markOutputs(layers, layerIdxs, numOuts); std::map<int, std::pair<float*,
std::string> > buffers; setupBuffers(layers, layerIdxs, portIdxs, numOuts, buffers);
#if INT8_ENABLED
bool useINT8 = builder->platformHasFastInt8(); if(!useINT8){ char buffer[100]; int numElem = sprintf(buffer,"#### INT8 mode is not supported on GPU available on the current machine! ####\n"); throw std::runtime_error(buffer); } else{ builder->setInt8Mode(1); } int trainBatchCount=0; while(1) { char filename[500]; char filename1[500]; sprintf(filename,"|>targetdir<|/tensorrt/batch%d",trainBatchCount++); getValidDataPath(filename,filename1); FILE *fp = fopen(filename1,"rb"); if(fp==NULL) { trainBatchCount-=1; break; } fclose(fp); } BatchStream calibrationStream(trainBatchCount); Int8EntropyCalibrator calibrator(calibrationStream, 0); builder->setAverageFindIterations(1); builder->setMinFindIterations(1); builder->setDebugSync(true); builder->setInt8Calibrator(&calibrator);
#endif
#if FP16_ENABLED
bool useFp16 = builder->platformHasFastFp16(); if(useFp16){ builder->setFp16Mode(1); } else{ printf("#### FP16 mode is not supported on GPU available on the current machine. Falling back to FP32 ####\n"); }
#endif
builder->setMaxBatchSize(batchSize); unsigned int wsize = 1 << 30;
builder->setMaxWorkspaceSize(wsize); engine =
builder->buildCudaEngine(*network); cudaError_t err = cudaGetLastError(); if
(err != cudaSuccess) { builder->setMaxWorkspaceSize(1 << 26); engine =
builder->buildCudaEngine(*network); cudaError_t err = cudaGetLastError(); if
(err != cudaSuccess) { builder = 0; engine = 0; network = 0; CUDA_CALL(err); }
} context = engine->createExecutionContext(); m_buffers = (void**) new
float*[buffers.size()]; for (std::map<int, std::pair<float*, std::string>
>::iterator it = buffers.begin(); it != buffers.end(); ++it) { int
binding_index = engine->getBindingIndex((it->second.second).c_str());
m_buffers[binding_index] = it->second.first; } network->destroy(); } void
MWTargetNetworkImpl::markOutputs(MWCNNLayer* layers[], int layerIdxs[], int
numOuts){ for (int k = 0; k < numOuts; k++) { int layerIdx = layerIdxs[k];
MWCNNLayer* layer = layers[layerIdx]; ITensor* itensor =
MWCNNLayerImpl::getITensor(layer->getOutputTensor(0)); char layerIdxStr[20];
sprintf(layerIdxStr, "output%d", layerIdx); itensor->setName(layerIdxStr);
network->markOutput(*itensor); } } void
MWTargetNetworkImpl::setupBuffers(MWCNNLayer* layers[], int layerIdxs[], int
portIdxs[], int numOuts, std::map<int, std::pair<float*, std::string> > &
buffers) { float* buffer = getBuffer(layers[0], 0, 0); auto inputITensor =
MWCNNLayerImpl::getITensor(layers[0]->getOutputTensor(0)); buffers[0] =
std::make_pair(buffer, std::string(inputITensor->getName())); for(int k = 0; k
< numOuts; k++) { int layerIdx = layerIdxs[k]; MWCNNLayer* layer =
layers[layerIdx]; ITensor* itensor =
MWCNNLayerImpl::getITensor(layer->getOutputTensor(0)); float* buffer =
getBuffer(layer, 0, portIdxs[k]); buffers[k+1] = std::make_pair(buffer,
std::string(itensor->getName())); } } float*
MWTargetNetworkImpl::getBuffer(MWCNNLayer* layer, int layerIdx, int portIdx) {
MWTensor* opTensor = layer->getOutputTensor(portIdx); float* data =
opTensor->getData<float>(); if (!data) { CUDA_CALL(cudaMalloc((void**)&data,
sizeof(float) * opTensor->getNumElements())); } opTensor->setData(data); return
data; } cudnnHandle_t* MWTargetNetworkImpl::getCudnnHandle() { return
PiMNTwjpqwsGWomVWqdO; } void MWTargetNetworkImpl::deallocate() { if
(m_buffers) { delete[] m_buffers; m_buffers = 0; } if (cudaFree(0) !=
cudaErrorCudartUnloading) { if (context) { context->destroy(); context = 0; }
if (engine) { engine->destroy(); engine = 0; } } } void
MWTargetNetworkImpl::cleanup() { if (builder) { builder->destroy(); builder =
0; } if (PiMNTwjpqwsGWomVWqdO) { cudnnDestroy(*PiMNTwjpqwsGWomVWqdO);
delete PiMNTwjpqwsGWomVWqdO; PiMNTwjpqwsGWomVWqdO = 0; } }
MWTargetNetworkImpl::~MWTargetNetworkImpl() { } |
integer_divider_test.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
// Test IntegerDivider: this tests *all* 32-bit pairs (a, b) where a % b is 0 or
// (b-1), so it takes a few minutes to run.
#include <assert.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "THH/THHIntegerDivider.cuh"
using std::vector;
template<typename Value>
struct TestCase {
Value dividend;
int divisor_idx;
int steps;
TestCase(Value dividend, int divisor_idx, int steps)
: dividend(dividend), divisor_idx(divisor_idx), steps(steps) { }
};
template<typename Value>
__global__ void testIntDivider(const IntDivider<Value> *dividers,
const TestCase<Value> *testCases,
int numCases)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numCases; i += stride) {
const TestCase<Value> &tc = testCases[i];
Value dividend = tc.dividend;
const IntDivider<Value> ÷r = dividers[tc.divisor_idx];
Value divisor = divider.divisor;
for (int j = 0; j < tc.steps; j++) {
if (sizeof(Value) == 4 && dividend > INT32_MAX) return;
DivMod<Value> qr = divider.divmod(dividend);
assert(qr.div == dividend / divisor && qr.mod == dividend % divisor);
dividend += divisor;
}
}
}
enum {
// Number of test cases per each kernel invocation.
NUM_CASES = 1000000,
// Maximum number of steps per each test case.
MAX_STEPS = 10000,
};
// Test the magic division algorithm.
template<typename Value>
class IntDividerTester {
public:
IntDividerTester() {
hipError_t err;
err = hipMalloc(÷rsBuf_, NUM_CASES * sizeof(IntDivider<Value>));
REQUIRE(err == hipSuccess);
err = hipMalloc(&testCasesBuf_, NUM_CASES * sizeof(TestCase<Value>));
REQUIRE(err == hipSuccess);
}
~IntDividerTester() {
hipError_t err;
err = hipFree(dividersBuf_);
REQUIRE(err == hipSuccess);
err = hipFree(testCasesBuf_);
REQUIRE(err == hipSuccess);
}
void addTestCase(Value dividend, Value divisor, int steps) {
// Append a new IntDivider using 'divisor' if necessary.
if (dividers_.empty() || dividers_.back().divisor != divisor)
dividers_.emplace_back(divisor);
// Append the test case.
testCases_.emplace_back(dividend, dividers_.size() - 1, steps);
// Launch the test kernel if the buffer is full.
if (testCases_.size() == NUM_CASES) flush();
}
void flush() {
hipError_t err;
if (testCases_.empty()) return;
REQUIRE(!dividers_.empty());
REQUIRE(dividers_.size() <= NUM_CASES);
REQUIRE(testCases_.size() <= NUM_CASES);
err = hipMemcpy(dividersBuf_, dividers_.data(),
dividers_.size() * sizeof(IntDivider<Value>),
hipMemcpyHostToDevice);
REQUIRE(err == hipSuccess);
err = hipMemcpy(testCasesBuf_, testCases_.data(),
testCases_.size() * sizeof(TestCase<Value>),
hipMemcpyHostToDevice);
REQUIRE(err == hipSuccess);
int numCases = testCases_.size();
hipLaunchKernelGGL(( testIntDivider<Value>), dim3(512), dim3(512), 0, 0,
dividersBuf_, testCasesBuf_, numCases);
dividers_.clear();
testCases_.clear();
}
private:
vector<IntDivider<Value>> dividers_;
vector<TestCase<Value>> testCases_;
IntDivider<Value> *dividersBuf_;
TestCase<Value> *testCasesBuf_;
};
static void testUint32Divider()
{
fprintf(stderr, "Testing 32-bit integer division ...");
IntDividerTester<uint32_t> tester;
for (uint64_t divisor = 1; divisor <= INT32_MAX; divisor++) {
if (divisor < 1000000 && divisor % 10000 == 0) fprintf(stderr, ".");
if (divisor % 10000000 == 0) fprintf(stderr, "-");
// In order to save time, we only test when the remainder is zero or
// (divisor - 1).
uint64_t dividend = 0;
while (dividend <= INT32_MAX) {
uint64_t steps = (INT32_MAX - dividend) / divisor + 1;
if (steps > MAX_STEPS) steps = MAX_STEPS;
tester.addTestCase(dividend, divisor, steps);
tester.addTestCase(dividend + divisor - 1, divisor, steps);
dividend += divisor * steps;
}
// Check the boundary cases.
tester.addTestCase(1, divisor, 1);
tester.addTestCase(INT32_MAX, divisor, 1);
}
tester.flush();
fprintf(stderr, " Done!\n");
}
// uint64_t divider uses plain division, so we just check a few random cases.
static void testUint64Divider()
{
IntDividerTester<uint64_t> tester;
uint64_t dividend = 0x123456789ULL;
uint64_t divisor = 0x54321ULL;
for (int i = 0; i < 1000; i++) {
if (divisor != 0) {
tester.addTestCase(dividend, divisor, 100);
// Test small divisor.
tester.addTestCase(dividend, divisor % 65536, 100);
// Create pseudorandom numbers.
dividend *= 0x100000001b3ULL;
dividend ^= 0x1234567890abcdefULL;
divisor *= 0x100000001b3ULL;
divisor ^= 0x1234567890abcdefULL;
}
}
tester.flush();
}
TEST_CASE( "CUDA integer divider", "[cuda]" ) {
testUint64Divider();
testUint32Divider();
hipError_t err = hipDeviceSynchronize();
REQUIRE(err == hipSuccess);
}
| integer_divider_test.cu | #define CATCH_CONFIG_MAIN
#include "catch.hpp"
// Test IntegerDivider: this tests *all* 32-bit pairs (a, b) where a % b is 0 or
// (b-1), so it takes a few minutes to run.
#include <assert.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "THC/THCIntegerDivider.cuh"
using std::vector;
template<typename Value>
struct TestCase {
Value dividend;
int divisor_idx;
int steps;
TestCase(Value dividend, int divisor_idx, int steps)
: dividend(dividend), divisor_idx(divisor_idx), steps(steps) { }
};
template<typename Value>
__global__ void testIntDivider(const IntDivider<Value> *dividers,
const TestCase<Value> *testCases,
int numCases)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numCases; i += stride) {
const TestCase<Value> &tc = testCases[i];
Value dividend = tc.dividend;
const IntDivider<Value> ÷r = dividers[tc.divisor_idx];
Value divisor = divider.divisor;
for (int j = 0; j < tc.steps; j++) {
if (sizeof(Value) == 4 && dividend > INT32_MAX) return;
DivMod<Value> qr = divider.divmod(dividend);
assert(qr.div == dividend / divisor && qr.mod == dividend % divisor);
dividend += divisor;
}
}
}
enum {
// Number of test cases per each kernel invocation.
NUM_CASES = 1000000,
// Maximum number of steps per each test case.
MAX_STEPS = 10000,
};
// Test the magic division algorithm.
template<typename Value>
class IntDividerTester {
public:
IntDividerTester() {
cudaError_t err;
err = cudaMalloc(÷rsBuf_, NUM_CASES * sizeof(IntDivider<Value>));
REQUIRE(err == cudaSuccess);
err = cudaMalloc(&testCasesBuf_, NUM_CASES * sizeof(TestCase<Value>));
REQUIRE(err == cudaSuccess);
}
~IntDividerTester() {
cudaError_t err;
err = cudaFree(dividersBuf_);
REQUIRE(err == cudaSuccess);
err = cudaFree(testCasesBuf_);
REQUIRE(err == cudaSuccess);
}
void addTestCase(Value dividend, Value divisor, int steps) {
// Append a new IntDivider using 'divisor' if necessary.
if (dividers_.empty() || dividers_.back().divisor != divisor)
dividers_.emplace_back(divisor);
// Append the test case.
testCases_.emplace_back(dividend, dividers_.size() - 1, steps);
// Launch the test kernel if the buffer is full.
if (testCases_.size() == NUM_CASES) flush();
}
void flush() {
cudaError_t err;
if (testCases_.empty()) return;
REQUIRE(!dividers_.empty());
REQUIRE(dividers_.size() <= NUM_CASES);
REQUIRE(testCases_.size() <= NUM_CASES);
err = cudaMemcpy(dividersBuf_, dividers_.data(),
dividers_.size() * sizeof(IntDivider<Value>),
cudaMemcpyHostToDevice);
REQUIRE(err == cudaSuccess);
err = cudaMemcpy(testCasesBuf_, testCases_.data(),
testCases_.size() * sizeof(TestCase<Value>),
cudaMemcpyHostToDevice);
REQUIRE(err == cudaSuccess);
int numCases = testCases_.size();
testIntDivider<Value><<<512, 512>>>(
dividersBuf_, testCasesBuf_, numCases);
dividers_.clear();
testCases_.clear();
}
private:
vector<IntDivider<Value>> dividers_;
vector<TestCase<Value>> testCases_;
IntDivider<Value> *dividersBuf_;
TestCase<Value> *testCasesBuf_;
};
static void testUint32Divider()
{
fprintf(stderr, "Testing 32-bit integer division ...");
IntDividerTester<uint32_t> tester;
for (uint64_t divisor = 1; divisor <= INT32_MAX; divisor++) {
if (divisor < 1000000 && divisor % 10000 == 0) fprintf(stderr, ".");
if (divisor % 10000000 == 0) fprintf(stderr, "-");
// In order to save time, we only test when the remainder is zero or
// (divisor - 1).
uint64_t dividend = 0;
while (dividend <= INT32_MAX) {
uint64_t steps = (INT32_MAX - dividend) / divisor + 1;
if (steps > MAX_STEPS) steps = MAX_STEPS;
tester.addTestCase(dividend, divisor, steps);
tester.addTestCase(dividend + divisor - 1, divisor, steps);
dividend += divisor * steps;
}
// Check the boundary cases.
tester.addTestCase(1, divisor, 1);
tester.addTestCase(INT32_MAX, divisor, 1);
}
tester.flush();
fprintf(stderr, " Done!\n");
}
// uint64_t divider uses plain division, so we just check a few random cases.
static void testUint64Divider()
{
IntDividerTester<uint64_t> tester;
uint64_t dividend = 0x123456789ULL;
uint64_t divisor = 0x54321ULL;
for (int i = 0; i < 1000; i++) {
if (divisor != 0) {
tester.addTestCase(dividend, divisor, 100);
// Test small divisor.
tester.addTestCase(dividend, divisor % 65536, 100);
// Create pseudorandom numbers.
dividend *= 0x100000001b3ULL;
dividend ^= 0x1234567890abcdefULL;
divisor *= 0x100000001b3ULL;
divisor ^= 0x1234567890abcdefULL;
}
}
tester.flush();
}
TEST_CASE( "CUDA integer divider", "[cuda]" ) {
testUint64Divider();
testUint32Divider();
cudaError_t err = cudaDeviceSynchronize();
REQUIRE(err == cudaSuccess);
}
|
4ba717076ec77889f5593b0f731167f01fef6589.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <algorithms.hpp>
#include <graph.hpp>
#include <utilities/error.hpp>
#include "two_hop_neighbors.cuh"
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<GraphCOO<VT, ET, WT>> get_two_hop_neighbors(GraphCSRView<VT, ET, WT> const &graph)
{
hipStream_t stream{nullptr};
rmm::device_vector<ET> exsum_degree(graph.number_of_edges + 1);
ET *d_exsum_degree = exsum_degree.data().get();
// Find the degree of the out vertex of each edge
degree_iterator<ET> deg_it(graph.offsets);
deref_functor<degree_iterator<ET>, ET> deref(deg_it);
exsum_degree[0] = ET{0};
thrust::transform(rmm::exec_policy(stream)->on(stream),
graph.indices,
graph.indices + graph.number_of_edges,
d_exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
d_exsum_degree + 1,
d_exsum_degree + graph.number_of_edges + 1,
d_exsum_degree + 1);
// Copy out the last value to get the size of scattered output
ET output_size = exsum_degree[graph.number_of_edges];
// Allocate memory for the scattered output
rmm::device_vector<VT> first_pair(output_size);
rmm::device_vector<VT> second_pair(output_size);
VT *d_first_pair = first_pair.data().get();
VT *d_second_pair = second_pair.data().get();
// Figure out number of blocks and allocate memory for block bucket offsets
ET num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
rmm::device_vector<ET> block_bucket_offsets(num_blocks + 1);
ET *d_block_bucket_offsets = block_bucket_offsets.data().get();
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((ET)MAXBLOCKS, (num_blocks / 512) + 1);
hipLaunchKernelGGL(( compute_bucket_offsets_kernel), dim3(grid), dim3(block), 0, nullptr,
d_exsum_degree, d_block_bucket_offsets, graph.number_of_edges, output_size);
block_bucket_offsets[num_blocks] = graph.number_of_edges;
// Scatter the expanded edge lists into temp space
grid.x = min((ET)MAXBLOCKS, num_blocks);
hipLaunchKernelGGL(( scatter_expand_kernel), dim3(grid), dim3(block), 0, nullptr, d_exsum_degree,
graph.indices,
graph.offsets,
d_block_bucket_offsets,
graph.number_of_vertices,
output_size,
num_blocks,
d_first_pair,
d_second_pair);
// TODO: This would be faster in a hash table (no sorting), unless there's
// some reason that the result has to be sorted
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(d_first_pair, d_second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
tuple_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
tuple_start,
tuple_end,
tuple_start,
self_loop_flagger<VT>());
tuple_end = thrust::unique(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
// Get things ready to return
ET outputSize = tuple_end - tuple_start;
auto result = std::make_unique<GraphCOO<VT, ET, WT>>(graph.number_of_vertices, outputSize, false);
hipMemcpy(result->src_indices(), d_first_pair, sizeof(VT) * outputSize, hipMemcpyDefault);
hipMemcpy(result->dst_indices(), d_second_pair, sizeof(VT) * outputSize, hipMemcpyDefault);
return result;
}
template std::unique_ptr<GraphCOO<int, int, float>> get_two_hop_neighbors(
GraphCSRView<int, int, float> const &);
template std::unique_ptr<GraphCOO<int, int, double>> get_two_hop_neighbors(
GraphCSRView<int, int, double> const &);
} // namespace cugraph
| 4ba717076ec77889f5593b0f731167f01fef6589.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Functions for computing the two hop neighbor pairs of a graph
*
* @file two_hop_neighbors.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <algorithms.hpp>
#include <graph.hpp>
#include <utilities/error.hpp>
#include "two_hop_neighbors.cuh"
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cugraph {
template <typename VT, typename ET, typename WT>
std::unique_ptr<GraphCOO<VT, ET, WT>> get_two_hop_neighbors(GraphCSRView<VT, ET, WT> const &graph)
{
cudaStream_t stream{nullptr};
rmm::device_vector<ET> exsum_degree(graph.number_of_edges + 1);
ET *d_exsum_degree = exsum_degree.data().get();
// Find the degree of the out vertex of each edge
degree_iterator<ET> deg_it(graph.offsets);
deref_functor<degree_iterator<ET>, ET> deref(deg_it);
exsum_degree[0] = ET{0};
thrust::transform(rmm::exec_policy(stream)->on(stream),
graph.indices,
graph.indices + graph.number_of_edges,
d_exsum_degree + 1,
deref);
// Take the inclusive sum of the degrees
thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream),
d_exsum_degree + 1,
d_exsum_degree + graph.number_of_edges + 1,
d_exsum_degree + 1);
// Copy out the last value to get the size of scattered output
ET output_size = exsum_degree[graph.number_of_edges];
// Allocate memory for the scattered output
rmm::device_vector<VT> first_pair(output_size);
rmm::device_vector<VT> second_pair(output_size);
VT *d_first_pair = first_pair.data().get();
VT *d_second_pair = second_pair.data().get();
// Figure out number of blocks and allocate memory for block bucket offsets
ET num_blocks = (output_size + TWO_HOP_BLOCK_SIZE - 1) / TWO_HOP_BLOCK_SIZE;
rmm::device_vector<ET> block_bucket_offsets(num_blocks + 1);
ET *d_block_bucket_offsets = block_bucket_offsets.data().get();
// Compute the block bucket offsets
dim3 grid, block;
block.x = 512;
grid.x = min((ET)MAXBLOCKS, (num_blocks / 512) + 1);
compute_bucket_offsets_kernel<<<grid, block, 0, nullptr>>>(
d_exsum_degree, d_block_bucket_offsets, graph.number_of_edges, output_size);
block_bucket_offsets[num_blocks] = graph.number_of_edges;
// Scatter the expanded edge lists into temp space
grid.x = min((ET)MAXBLOCKS, num_blocks);
scatter_expand_kernel<<<grid, block, 0, nullptr>>>(d_exsum_degree,
graph.indices,
graph.offsets,
d_block_bucket_offsets,
graph.number_of_vertices,
output_size,
num_blocks,
d_first_pair,
d_second_pair);
// TODO: This would be faster in a hash table (no sorting), unless there's
// some reason that the result has to be sorted
// Remove duplicates and self pairings
auto tuple_start = thrust::make_zip_iterator(thrust::make_tuple(d_first_pair, d_second_pair));
auto tuple_end = tuple_start + output_size;
thrust::sort(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
tuple_end = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
tuple_start,
tuple_end,
tuple_start,
self_loop_flagger<VT>());
tuple_end = thrust::unique(rmm::exec_policy(stream)->on(stream), tuple_start, tuple_end);
// Get things ready to return
ET outputSize = tuple_end - tuple_start;
auto result = std::make_unique<GraphCOO<VT, ET, WT>>(graph.number_of_vertices, outputSize, false);
cudaMemcpy(result->src_indices(), d_first_pair, sizeof(VT) * outputSize, cudaMemcpyDefault);
cudaMemcpy(result->dst_indices(), d_second_pair, sizeof(VT) * outputSize, cudaMemcpyDefault);
return result;
}
template std::unique_ptr<GraphCOO<int, int, float>> get_two_hop_neighbors(
GraphCSRView<int, int, float> const &);
template std::unique_ptr<GraphCOO<int, int, double>> get_two_hop_neighbors(
GraphCSRView<int, int, double> const &);
} // namespace cugraph
|
ba81b4f66ddbc855bc3d31f66e42ce08216329d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(){
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // devices copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
cout << a << endl << b << endl << c << endl;
return 0;
}
| ba81b4f66ddbc855bc3d31f66e42ce08216329d8.cu | #include <iostream>
using namespace std;
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(){
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // devices copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
cout << a << endl << b << endl << c << endl;
return 0;
}
|
14bb7fec617cd471d325933b336def27171dd01e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include "exchange.h"
#include "float3.h"
#include "stencil.h"
#include "amul.h"
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
// Exchange + Dzyaloshinskii-Moriya interaction for bulk material.
// Crystal Symmetry D_2d
// Energy:
//
// E = D (L_xz(y) + L_yz(x))
//
// Effective field:
//
// Hx = 2A/Bs nablaMx + 2D/Bs dzMy - 2D/Bs dyMz
// Hy = 2A/Bs nablaMy + 2D/Bs dxMz - 2D/Bs dzMx
// Hz = 2A/Bs nablaMz + 2D/Bs dyMx - 2D/Bs dxMy
//
// Boundary conditions:
//
// 2A dxMx = 0
// -D Mz + 2A dxMy = 0
// D My + 2A dxMz = 0
//
// -D Mz + 2A dyMx = 0
// 2A dyMy = 0
// D Mx + 2A dyMz = 0
//
// 2A dzMx = 0
// 2A dzMy = 0
// 2A dzMz = 0
//
extern "C" __global__ void
adddmid2d(float* __restrict__ Hx, float* __restrict__ Hy, float* __restrict__ Hz,
float* __restrict__ mx, float* __restrict__ my, float* __restrict__ mz,
float* __restrict__ Ms_, float Ms_mul,
float* __restrict__ aLUT2d, float* __restrict__ DLUT2d,
uint8_t* __restrict__ regions,
float cx, float cy, float cz, int Nx, int Ny, int Nz, uint8_t PBC) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= Nx || iy >= Ny || iz >= Nz) {
return;
}
int I = idx(ix, iy, iz); // central cell index
float3 h = make_float3(0.0,0.0,0.0); // add to H
float3 m0 = make_float3(mx[I], my[I], mz[I]); // central m
uint8_t r0 = regions[I];
int i_; // neighbor index
if(is0(m0)) {
return;
}
// x derivatives (along length)
{
float3 m1 = make_float3(0.0f, 0.0f, 0.0f); // left neighbor
i_ = idx(lclampx(ix-1), iy, iz); // load neighbor m if inside grid, keep 0 otherwise
if (ix-1 >= 0 || PBCx) {
m1 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m1)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m1)) { // neighbor missing
m1.x = m0.x;
m1.y = m0.y + (-cx * D_2A * m0.z);
m1.z = m0.z - (-cx * D_2A * m0.y);
}
h += (2.0f*A/(cx*cx)) * (m1 - m0); // exchange
h.y += (D/cx)*(m1.z);
h.z -= (D/cx)*(m1.y);
}
{
float3 m2 = make_float3(0.0f, 0.0f, 0.0f); // right neighbor
i_ = idx(hclampx(ix+1), iy, iz);
if (ix+1 < Nx || PBCx) {
m2 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m2)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m2)) {
m2.x = m0.x;
m2.y = m0.y + (+cx * D_2A * m0.z);
m2.z = m0.z - (+cx * D_2A * m0.y);
}
h += (2.0f*A/(cx*cx)) * (m2 - m0);
h.y += (D/cx)*(-m2.z);
h.z -= (D/cx)*(-m2.y);
}
// y derivatives (along height)
{
float3 m1 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, lclampy(iy-1), iz);
if (iy-1 >= 0 || PBCy) {
m1 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m1)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m1)) {
m1.x = m0.x + (-cy * D_2A * m0.z);
m1.y = m0.y;
m1.z = m0.z - (-cy * D_2A * m0.x);
}
h += (2.0f*A/(cy*cy)) * (m1 - m0);
h.x += (D/cy)*(m1.z);
h.z -= (D/cy)*(m1.x);
}
{
float3 m2 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, hclampy(iy+1), iz);
if (iy+1 < Ny || PBCy) {
m2 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m2)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m2)) {
m2.x = m0.x + (+cy * D_2A * m0.z);
m2.y = m0.y;
m2.z = m0.z - (+cy * D_2A * m0.x);
}
h += (2.0f*A/(cy*cy)) * (m2 - m0);
h.x += (D/cy)*(-m2.z);
h.z -= (D/cy)*(-m2.x);
}
// only take vertical derivative for 3D sim
if (Nz != 1) {
// bottom neighbor
{
float3 m1 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, iy, lclampz(iz-1));
if (iz-1 >= 0 || PBCz) {
m1 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m1)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
if (is0(m1)) {
m1.x = m0.x;
m1.y = m0.y;
m1.z = m0.z;
}
h += (2.0f*A/(cz*cz)) * (m1 - m0);
}
// top neighbor
{
float3 m2 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, iy, hclampz(iz+1));
if (iz+1 < Nz || PBCz) {
m2 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m2)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
if (is0(m2)) {
m2.x = m0.x;
m2.y = m0.y;
m2.z = m0.z;
}
h += (2.0f*A/(cz*cz)) * (m2 - m0);
}
}
// write back, result is H + Hdmi + Hex
float invMs = inv_Msat(Ms_, Ms_mul, I);
Hx[I] += h.x*invMs;
Hy[I] += h.y*invMs;
Hz[I] += h.z*invMs;
}
// Note on boundary conditions.
//
// We need the derivative and laplacian of m in point A, but e.g. C lies out of the boundaries.
// We use the boundary condition in B (derivative of the magnetization) to extrapolate m to point C:
// m_C = m_A + (dm/dx)|_B * cellsize
//
// When point C is inside the boundary, we just use its actual value.
//
// Then we can take the central derivative in A:
// (dm/dx)|_A = (m_C - m_D) / (2*cellsize)
// And the laplacian:
// lapl(m)|_A = (m_C + m_D - 2*m_A) / (cellsize^2)
//
// All these operations should be second order as they involve only central derivatives.
//
// ------------------------------------------------------------------ *
// | | C |
// | | ** |
// | | *** |
// | | *** |
// | | *** |
// | | *** |
// | B |
// | *** | |
// | *** | |
// | **** | |
// | **** | |
// | **** | |
// | ** A | |
// | ***** | |
// | ****** | |
// | ********* | |
// |D ******** | |
// | | |
// +----------------+----------------+-----------------+---------------+
// -1 -0.5 0 0.5 1
// x
| 14bb7fec617cd471d325933b336def27171dd01e.cu | #include <stdint.h>
#include "exchange.h"
#include "float3.h"
#include "stencil.h"
#include "amul.h"
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
// Exchange + Dzyaloshinskii-Moriya interaction for bulk material.
// Crystal Symmetry D_2d
// Energy:
//
// E = D (L_xz(y) + L_yz(x))
//
// Effective field:
//
// Hx = 2A/Bs nabla²Mx + 2D/Bs dzMy - 2D/Bs dyMz
// Hy = 2A/Bs nabla²My + 2D/Bs dxMz - 2D/Bs dzMx
// Hz = 2A/Bs nabla²Mz + 2D/Bs dyMx - 2D/Bs dxMy
//
// Boundary conditions:
//
// 2A dxMx = 0
// -D Mz + 2A dxMy = 0
// D My + 2A dxMz = 0
//
// -D Mz + 2A dyMx = 0
// 2A dyMy = 0
// D Mx + 2A dyMz = 0
//
// 2A dzMx = 0
// 2A dzMy = 0
// 2A dzMz = 0
//
extern "C" __global__ void
adddmid2d(float* __restrict__ Hx, float* __restrict__ Hy, float* __restrict__ Hz,
float* __restrict__ mx, float* __restrict__ my, float* __restrict__ mz,
float* __restrict__ Ms_, float Ms_mul,
float* __restrict__ aLUT2d, float* __restrict__ DLUT2d,
uint8_t* __restrict__ regions,
float cx, float cy, float cz, int Nx, int Ny, int Nz, uint8_t PBC) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= Nx || iy >= Ny || iz >= Nz) {
return;
}
int I = idx(ix, iy, iz); // central cell index
float3 h = make_float3(0.0,0.0,0.0); // add to H
float3 m0 = make_float3(mx[I], my[I], mz[I]); // central m
uint8_t r0 = regions[I];
int i_; // neighbor index
if(is0(m0)) {
return;
}
// x derivatives (along length)
{
float3 m1 = make_float3(0.0f, 0.0f, 0.0f); // left neighbor
i_ = idx(lclampx(ix-1), iy, iz); // load neighbor m if inside grid, keep 0 otherwise
if (ix-1 >= 0 || PBCx) {
m1 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m1)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m1)) { // neighbor missing
m1.x = m0.x;
m1.y = m0.y + (-cx * D_2A * m0.z);
m1.z = m0.z - (-cx * D_2A * m0.y);
}
h += (2.0f*A/(cx*cx)) * (m1 - m0); // exchange
h.y += (D/cx)*(m1.z);
h.z -= (D/cx)*(m1.y);
}
{
float3 m2 = make_float3(0.0f, 0.0f, 0.0f); // right neighbor
i_ = idx(hclampx(ix+1), iy, iz);
if (ix+1 < Nx || PBCx) {
m2 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m2)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m2)) {
m2.x = m0.x;
m2.y = m0.y + (+cx * D_2A * m0.z);
m2.z = m0.z - (+cx * D_2A * m0.y);
}
h += (2.0f*A/(cx*cx)) * (m2 - m0);
h.y += (D/cx)*(-m2.z);
h.z -= (D/cx)*(-m2.y);
}
// y derivatives (along height)
{
float3 m1 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, lclampy(iy-1), iz);
if (iy-1 >= 0 || PBCy) {
m1 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m1)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m1)) {
m1.x = m0.x + (-cy * D_2A * m0.z);
m1.y = m0.y;
m1.z = m0.z - (-cy * D_2A * m0.x);
}
h += (2.0f*A/(cy*cy)) * (m1 - m0);
h.x += (D/cy)*(m1.z);
h.z -= (D/cy)*(m1.x);
}
{
float3 m2 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, hclampy(iy+1), iz);
if (iy+1 < Ny || PBCy) {
m2 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m2)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
float D = DLUT2d[symidx(r0, r1)];
float D_2A = D/(2.0f*A);
if (is0(m2)) {
m2.x = m0.x + (+cy * D_2A * m0.z);
m2.y = m0.y;
m2.z = m0.z - (+cy * D_2A * m0.x);
}
h += (2.0f*A/(cy*cy)) * (m2 - m0);
h.x += (D/cy)*(-m2.z);
h.z -= (D/cy)*(-m2.x);
}
// only take vertical derivative for 3D sim
if (Nz != 1) {
// bottom neighbor
{
float3 m1 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, iy, lclampz(iz-1));
if (iz-1 >= 0 || PBCz) {
m1 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m1)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
if (is0(m1)) {
m1.x = m0.x;
m1.y = m0.y;
m1.z = m0.z;
}
h += (2.0f*A/(cz*cz)) * (m1 - m0);
}
// top neighbor
{
float3 m2 = make_float3(0.0f, 0.0f, 0.0f);
i_ = idx(ix, iy, hclampz(iz+1));
if (iz+1 < Nz || PBCz) {
m2 = make_float3(mx[i_], my[i_], mz[i_]);
}
int r1 = is0(m2)? r0 : regions[i_];
float A = aLUT2d[symidx(r0, r1)];
if (is0(m2)) {
m2.x = m0.x;
m2.y = m0.y;
m2.z = m0.z;
}
h += (2.0f*A/(cz*cz)) * (m2 - m0);
}
}
// write back, result is H + Hdmi + Hex
float invMs = inv_Msat(Ms_, Ms_mul, I);
Hx[I] += h.x*invMs;
Hy[I] += h.y*invMs;
Hz[I] += h.z*invMs;
}
// Note on boundary conditions.
//
// We need the derivative and laplacian of m in point A, but e.g. C lies out of the boundaries.
// We use the boundary condition in B (derivative of the magnetization) to extrapolate m to point C:
// m_C = m_A + (dm/dx)|_B * cellsize
//
// When point C is inside the boundary, we just use its actual value.
//
// Then we can take the central derivative in A:
// (dm/dx)|_A = (m_C - m_D) / (2*cellsize)
// And the laplacian:
// lapl(m)|_A = (m_C + m_D - 2*m_A) / (cellsize^2)
//
// All these operations should be second order as they involve only central derivatives.
//
// ------------------------------------------------------------------ *
// | | C |
// | | ** |
// | | *** |
// | | *** |
// | | *** |
// | | *** |
// | B |
// | *** | |
// | *** | |
// | **** | |
// | **** | |
// | **** | |
// | ** A | |
// | ***** | |
// | ****** | |
// | ********* | |
// |D ******** | |
// | | |
// +----------------+----------------+-----------------+---------------+
// -1 -0.5 0 0.5 1
// x
|
e9e1cc910b5145a42f92236a5b6f6f003b27fc70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include "scanner_header.h"
/************* Functions **************/
__global__ void x4(int* x5, int* x6, int x7, int x8, int x9) {
int x10 = blockIdx.z;
// this is the permutation kernel for List(0, 2, 1)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimZ x dimX x dimY)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, (dimY+31)/32, dimZ), dim3(32, 8, 1)>>>
__shared__ int x11[1056];
// read data from input array to shared memory
int x12 = 0;
int x13 = blockIdx.y * 32;
int x14 = blockIdx.x * 32;
while (x12 < 32) {
int x15 = x12;
int x16 = x10 + threadIdx.z;
if (x16 < x7 && x13 + (threadIdx.y + x15) < x8 && x14 + threadIdx.x < x9) x11[1056 * threadIdx.z + 33 * (threadIdx.y + x15) + threadIdx.x] = x5[(x13 + (threadIdx.y + x15) + x16 * x8) * x9 + (x14 + threadIdx.x)];
x12 = x12 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x17 = 0;
while (x17 < 32) {
int x18 = x17;
int x19 = x10 + threadIdx.z;
if (x19 < x7 && x14 + (threadIdx.y + x18) < x9 && x13 + threadIdx.x < x8) x6[(x14 + (threadIdx.y + x18) + x19 * x9) * x8 + (x13 + threadIdx.x)] = x11[1056 * threadIdx.z + 33 * threadIdx.x + (threadIdx.y + x18)];
x17 = x17 + 8;
}
}
__global__ void x24(int* x25, int* x26, int x27, int x28, int x29) {
int x30 = blockIdx.y;
// this is the permutation kernel for List(1, 2, 0)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimY x dimX x dimZ)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, dimY, (dimZ+31)/32), dim3(32, 1, 8)>>>
__shared__ int x31[1056];
// read data from input array to shared memory
int x32 = 0;
int x33 = blockIdx.z * 32;
int x34 = blockIdx.x * 32;
while (x32 < 32) {
int x35 = x32;
int x36 = x33 + (threadIdx.z + x35);
if (x36 < x27 && x30 + threadIdx.y < x28 && x34 + threadIdx.x < x29) x31[33 * (threadIdx.z + x35) + 33 * threadIdx.y + threadIdx.x] = x25[(x30 + threadIdx.y + x36 * x28) * x29 + (x34 + threadIdx.x)];
x32 = x32 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x37 = 0;
while (x37 < 32) {
int x38 = x37;
int x39 = x30 + threadIdx.y;
if (x39 < x28 && x34 + (threadIdx.z + x38) < x29 && x33 + threadIdx.x < x27) x26[(x34 + (threadIdx.z + x38) + x39 * x29) * x27 + (x33 + threadIdx.x)] = x31[33 * threadIdx.x + 33 * threadIdx.y + (threadIdx.z + x38)];
x37 = x37 + 8;
}
}
__global__ void x44(int* x45, int* x46, int x47, int x48, int x49) {
int x50 = blockIdx.y;
// this is the permutation kernel for List(2, 1, 0)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimX x dimY x dimZ)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, dimY, (dimZ+31)/32), dim3(32, 1, 8)>>>
__shared__ int x51[1056];
// read data from input array to shared memory
int x52 = 0;
int x53 = blockIdx.z * 32;
int x54 = blockIdx.x * 32;
while (x52 < 32) {
int x55 = x52;
int x56 = x53 + (threadIdx.z + x55);
if (x56 < x47 && x50 + threadIdx.y < x48 && x54 + threadIdx.x < x49) x51[33 * (threadIdx.z + x55) + 33 * threadIdx.y + threadIdx.x] = x45[(x50 + threadIdx.y + x56 * x48) * x49 + (x54 + threadIdx.x)];
x52 = x52 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x57 = 0;
while (x57 < 32) {
int x58 = x57;
int x59 = x54 + (threadIdx.z + x58);
if (x59 < x49 && x50 + threadIdx.y < x48 && x53 + threadIdx.x < x47) x46[(x50 + threadIdx.y + x59 * x48) * x47 + (x53 + threadIdx.x)] = x51[33 * threadIdx.x + 33 * threadIdx.y + (threadIdx.z + x58)];
x57 = x57 + 8;
}
}
__global__ void x64(int* x65, int* x66, int x67, int x68, int x69) {
int x70 = blockIdx.z;
// this is the permutation kernel for List(2, 0, 1)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimX x dimZ x dimY)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, (dimY+31)/32, dimZ), dim3(32, 8, 1)>>>
__shared__ int x71[1056];
// read data from input array to shared memory
int x72 = 0;
int x73 = blockIdx.y * 32;
int x74 = blockIdx.x * 32;
while (x72 < 32) {
int x75 = x72;
int x76 = x70 + threadIdx.z;
if (x76 < x67 && x73 + (threadIdx.y + x75) < x68 && x74 + threadIdx.x < x69) x71[1056 * threadIdx.z + 33 * (threadIdx.y + x75) + threadIdx.x] = x65[(x73 + (threadIdx.y + x75) + x76 * x68) * x69 + (x74 + threadIdx.x)];
x72 = x72 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x77 = 0;
while (x77 < 32) {
int x78 = x77;
int x79 = x74 + (threadIdx.y + x78);
if (x79 < x69 && x70 + threadIdx.z < x67 && x73 + threadIdx.x < x68) x66[(x70 + threadIdx.z + x79 * x67) * x68 + (x73 + threadIdx.x)] = x71[1056 * threadIdx.z + 33 * threadIdx.x + (threadIdx.y + x78)];
x77 = x77 + 8;
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
int* x1 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_021/input.data", x1, 165000);
int* x2 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x2, (size_t)(165000 * sizeof(int))));
CUDA_CALL(hipMemcpy(x2, x1, (size_t)(165000 * sizeof(int)), hipMemcpyHostToDevice));
int* x3 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x3, (size_t)(165000 * sizeof(int))));
hipLaunchKernelGGL(( x4), dim3(dim3(2, 2, 60)), dim3(dim3(32, 8, 1)), 0, 0, x2, x3, 60, 55, 50);
int* x20 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(hipMemcpy(x20, x3, (size_t)(165000 * sizeof(int)), hipMemcpyDeviceToHost));
check_int_array("golden/permute3D_021/output.data", x20, 165000);
int* x21 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_120/input.data", x21, 165000);
int* x22 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x22, (size_t)(165000 * sizeof(int))));
CUDA_CALL(hipMemcpy(x22, x21, (size_t)(165000 * sizeof(int)), hipMemcpyHostToDevice));
int* x23 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x23, (size_t)(165000 * sizeof(int))));
hipLaunchKernelGGL(( x24), dim3(dim3(2, 55, 2)), dim3(dim3(32, 1, 8)), 0, 0, x22, x23, 60, 55, 50);
int* x40 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(hipMemcpy(x40, x23, (size_t)(165000 * sizeof(int)), hipMemcpyDeviceToHost));
check_int_array("golden/permute3D_120/output.data", x40, 165000);
int* x41 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_210/input.data", x41, 165000);
int* x42 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x42, (size_t)(165000 * sizeof(int))));
CUDA_CALL(hipMemcpy(x42, x41, (size_t)(165000 * sizeof(int)), hipMemcpyHostToDevice));
int* x43 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x43, (size_t)(165000 * sizeof(int))));
hipLaunchKernelGGL(( x44), dim3(dim3(2, 55, 2)), dim3(dim3(32, 1, 8)), 0, 0, x42, x43, 60, 55, 50);
int* x60 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(hipMemcpy(x60, x43, (size_t)(165000 * sizeof(int)), hipMemcpyDeviceToHost));
check_int_array("golden/permute3D_210/output.data", x60, 165000);
int* x61 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_201/input.data", x61, 165000);
int* x62 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x62, (size_t)(165000 * sizeof(int))));
CUDA_CALL(hipMemcpy(x62, x61, (size_t)(165000 * sizeof(int)), hipMemcpyHostToDevice));
int* x63 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(hipMalloc(&x63, (size_t)(165000 * sizeof(int))));
hipLaunchKernelGGL(( x64), dim3(dim3(2, 2, 60)), dim3(dim3(32, 8, 1)), 0, 0, x62, x63, 60, 55, 50);
int* x80 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(hipMemcpy(x80, x63, (size_t)(165000 * sizeof(int)), hipMemcpyDeviceToHost));
check_int_array("golden/permute3D_201/output.data", x80, 165000);
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| e9e1cc910b5145a42f92236a5b6f6f003b27fc70.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include "scanner_header.h"
/************* Functions **************/
__global__ void x4(int* x5, int* x6, int x7, int x8, int x9) {
int x10 = blockIdx.z;
// this is the permutation kernel for List(0, 2, 1)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimZ x dimX x dimY)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, (dimY+31)/32, dimZ), dim3(32, 8, 1)>>>
__shared__ int x11[1056];
// read data from input array to shared memory
int x12 = 0;
int x13 = blockIdx.y * 32;
int x14 = blockIdx.x * 32;
while (x12 < 32) {
int x15 = x12;
int x16 = x10 + threadIdx.z;
if (x16 < x7 && x13 + (threadIdx.y + x15) < x8 && x14 + threadIdx.x < x9) x11[1056 * threadIdx.z + 33 * (threadIdx.y + x15) + threadIdx.x] = x5[(x13 + (threadIdx.y + x15) + x16 * x8) * x9 + (x14 + threadIdx.x)];
x12 = x12 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x17 = 0;
while (x17 < 32) {
int x18 = x17;
int x19 = x10 + threadIdx.z;
if (x19 < x7 && x14 + (threadIdx.y + x18) < x9 && x13 + threadIdx.x < x8) x6[(x14 + (threadIdx.y + x18) + x19 * x9) * x8 + (x13 + threadIdx.x)] = x11[1056 * threadIdx.z + 33 * threadIdx.x + (threadIdx.y + x18)];
x17 = x17 + 8;
}
}
__global__ void x24(int* x25, int* x26, int x27, int x28, int x29) {
int x30 = blockIdx.y;
// this is the permutation kernel for List(1, 2, 0)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimY x dimX x dimZ)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, dimY, (dimZ+31)/32), dim3(32, 1, 8)>>>
__shared__ int x31[1056];
// read data from input array to shared memory
int x32 = 0;
int x33 = blockIdx.z * 32;
int x34 = blockIdx.x * 32;
while (x32 < 32) {
int x35 = x32;
int x36 = x33 + (threadIdx.z + x35);
if (x36 < x27 && x30 + threadIdx.y < x28 && x34 + threadIdx.x < x29) x31[33 * (threadIdx.z + x35) + 33 * threadIdx.y + threadIdx.x] = x25[(x30 + threadIdx.y + x36 * x28) * x29 + (x34 + threadIdx.x)];
x32 = x32 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x37 = 0;
while (x37 < 32) {
int x38 = x37;
int x39 = x30 + threadIdx.y;
if (x39 < x28 && x34 + (threadIdx.z + x38) < x29 && x33 + threadIdx.x < x27) x26[(x34 + (threadIdx.z + x38) + x39 * x29) * x27 + (x33 + threadIdx.x)] = x31[33 * threadIdx.x + 33 * threadIdx.y + (threadIdx.z + x38)];
x37 = x37 + 8;
}
}
__global__ void x44(int* x45, int* x46, int x47, int x48, int x49) {
int x50 = blockIdx.y;
// this is the permutation kernel for List(2, 1, 0)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimX x dimY x dimZ)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, dimY, (dimZ+31)/32), dim3(32, 1, 8)>>>
__shared__ int x51[1056];
// read data from input array to shared memory
int x52 = 0;
int x53 = blockIdx.z * 32;
int x54 = blockIdx.x * 32;
while (x52 < 32) {
int x55 = x52;
int x56 = x53 + (threadIdx.z + x55);
if (x56 < x47 && x50 + threadIdx.y < x48 && x54 + threadIdx.x < x49) x51[33 * (threadIdx.z + x55) + 33 * threadIdx.y + threadIdx.x] = x45[(x50 + threadIdx.y + x56 * x48) * x49 + (x54 + threadIdx.x)];
x52 = x52 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x57 = 0;
while (x57 < 32) {
int x58 = x57;
int x59 = x54 + (threadIdx.z + x58);
if (x59 < x49 && x50 + threadIdx.y < x48 && x53 + threadIdx.x < x47) x46[(x50 + threadIdx.y + x59 * x48) * x47 + (x53 + threadIdx.x)] = x51[33 * threadIdx.x + 33 * threadIdx.y + (threadIdx.z + x58)];
x57 = x57 + 8;
}
}
__global__ void x64(int* x65, int* x66, int x67, int x68, int x69) {
int x70 = blockIdx.z;
// this is the permutation kernel for List(2, 0, 1)
// arg0: 3D input tensor (dimZ x dimY x dimX)
// arg1: 3D output tensor (dimX x dimZ x dimY)
// arg2: dimZ of input
// arg3: dimY of input
// arg4: dimX of input
// caller must use <<<dim3((dimX+31)/32, (dimY+31)/32, dimZ), dim3(32, 8, 1)>>>
__shared__ int x71[1056];
// read data from input array to shared memory
int x72 = 0;
int x73 = blockIdx.y * 32;
int x74 = blockIdx.x * 32;
while (x72 < 32) {
int x75 = x72;
int x76 = x70 + threadIdx.z;
if (x76 < x67 && x73 + (threadIdx.y + x75) < x68 && x74 + threadIdx.x < x69) x71[1056 * threadIdx.z + 33 * (threadIdx.y + x75) + threadIdx.x] = x65[(x73 + (threadIdx.y + x75) + x76 * x68) * x69 + (x74 + threadIdx.x)];
x72 = x72 + 8;
}
// sync threads
__syncthreads();
// write data from shared memory to output array
int x77 = 0;
while (x77 < 32) {
int x78 = x77;
int x79 = x74 + (threadIdx.y + x78);
if (x79 < x69 && x70 + threadIdx.z < x67 && x73 + threadIdx.x < x68) x66[(x70 + threadIdx.z + x79 * x67) * x68 + (x73 + threadIdx.x)] = x71[1056 * threadIdx.z + 33 * threadIdx.x + (threadIdx.y + x78)];
x77 = x77 + 8;
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
int* x1 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_021/input.data", x1, 165000);
int* x2 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x2, (size_t)(165000 * sizeof(int))));
CUDA_CALL(cudaMemcpy(x2, x1, (size_t)(165000 * sizeof(int)), cudaMemcpyHostToDevice));
int* x3 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x3, (size_t)(165000 * sizeof(int))));
x4<<<dim3(2, 2, 60), dim3(32, 8, 1)>>>(x2, x3, 60, 55, 50);
int* x20 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(cudaMemcpy(x20, x3, (size_t)(165000 * sizeof(int)), cudaMemcpyDeviceToHost));
check_int_array("golden/permute3D_021/output.data", x20, 165000);
int* x21 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_120/input.data", x21, 165000);
int* x22 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x22, (size_t)(165000 * sizeof(int))));
CUDA_CALL(cudaMemcpy(x22, x21, (size_t)(165000 * sizeof(int)), cudaMemcpyHostToDevice));
int* x23 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x23, (size_t)(165000 * sizeof(int))));
x24<<<dim3(2, 55, 2), dim3(32, 1, 8)>>>(x22, x23, 60, 55, 50);
int* x40 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(cudaMemcpy(x40, x23, (size_t)(165000 * sizeof(int)), cudaMemcpyDeviceToHost));
check_int_array("golden/permute3D_120/output.data", x40, 165000);
int* x41 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_210/input.data", x41, 165000);
int* x42 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x42, (size_t)(165000 * sizeof(int))));
CUDA_CALL(cudaMemcpy(x42, x41, (size_t)(165000 * sizeof(int)), cudaMemcpyHostToDevice));
int* x43 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x43, (size_t)(165000 * sizeof(int))));
x44<<<dim3(2, 55, 2), dim3(32, 1, 8)>>>(x42, x43, 60, 55, 50);
int* x60 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(cudaMemcpy(x60, x43, (size_t)(165000 * sizeof(int)), cudaMemcpyDeviceToHost));
check_int_array("golden/permute3D_210/output.data", x60, 165000);
int* x61 = (int*)malloc(165000 * sizeof(int));
scan_ints("golden/permute3D_201/input.data", x61, 165000);
int* x62 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x62, (size_t)(165000 * sizeof(int))));
CUDA_CALL(cudaMemcpy(x62, x61, (size_t)(165000 * sizeof(int)), cudaMemcpyHostToDevice));
int* x63 = (int*)malloc(0 * sizeof(int));
CUDA_CALL(cudaMalloc(&x63, (size_t)(165000 * sizeof(int))));
x64<<<dim3(2, 2, 60), dim3(32, 8, 1)>>>(x62, x63, 60, 55, 50);
int* x80 = (int*)malloc(165000 * sizeof(int));
CUDA_CALL(cudaMemcpy(x80, x63, (size_t)(165000 * sizeof(int)), cudaMemcpyDeviceToHost));
check_int_array("golden/permute3D_201/output.data", x80, 165000);
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
f2dce917ccec38d416f2d83c97c3aec40c73c066.hip | // !!! This is a file automatically generated by hipify!!!
#include "../Linear.h"
#include <CUDA_ptr.hpp>
#include <bits/stdc++.h>
#include <csr_mat/csr_mat.h>
#include <wtime.h>
using namespace culib;
int main(int ac, char **av) {
auto in_size = std::atoi(av[1]);
auto out_size = std::atoi(av[2]);
auto seq_len = std::atoi(av[3]);
float sparsity = std::atof(av[4]);
CUDA_ptr<half> IN(seq_len * in_size, half_one); // input
CUDA_ptr<half> OUT(out_size * seq_len); // output
auto linear =
gen_sparse_linear<csr_mat>(out_size, in_size, seq_len, sparsity);
hipStream_t stream;
hipStreamCreate(&stream);
linear->forward(OUT.get(), IN.get(), stream);
auto time = wtime_new(
10, [&]() { linear->forward(OUT.get(), IN.get(), stream); },
[]() { cudaChk(hipDeviceSynchronize()); });
std::vector<half> res(out_size * seq_len);
OUT.dump(res.data());
std::cout << time << std::endl;
// for (auto i : res) {
// std::cout << __half2float(i) << ",";
// }
// puts("");
} | f2dce917ccec38d416f2d83c97c3aec40c73c066.cu | #include "../Linear.h"
#include <CUDA_ptr.hpp>
#include <bits/stdc++.h>
#include <csr_mat/csr_mat.h>
#include <wtime.h>
using namespace culib;
int main(int ac, char **av) {
auto in_size = std::atoi(av[1]);
auto out_size = std::atoi(av[2]);
auto seq_len = std::atoi(av[3]);
float sparsity = std::atof(av[4]);
CUDA_ptr<half> IN(seq_len * in_size, half_one); // input
CUDA_ptr<half> OUT(out_size * seq_len); // output
auto linear =
gen_sparse_linear<csr_mat>(out_size, in_size, seq_len, sparsity);
cudaStream_t stream;
cudaStreamCreate(&stream);
linear->forward(OUT.get(), IN.get(), stream);
auto time = wtime_new(
10, [&]() { linear->forward(OUT.get(), IN.get(), stream); },
[]() { cudaChk(cudaDeviceSynchronize()); });
std::vector<half> res(out_size * seq_len);
OUT.dump(res.data());
std::cout << time << std::endl;
// for (auto i : res) {
// std::cout << __half2float(i) << ",";
// }
// puts("");
} |
bdaa6b34bdf345d7ce23522781f4314e0801afce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../helper/util.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
__global__ void
vec_add_kernel (int *a, int *b, int *c, int len)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < 1000*len)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
int
main (int args, char **argv)
{
int len = (2<<10);
int *a = new int[len];
int *b = new int[len];
int *c = new int[len];
for (int i = 0; i < len; i++)
{
a[i] = rand () % 1024;
b[i] = rand () % 512;
}
int *a_d, *b_d, *c_d;
H_ERR(hipMalloc ((void **) &a_d, sizeof (int) * len));
H_ERR(hipMalloc ((void **) &b_d, sizeof (int) * len));
H_ERR(hipMalloc ((void **) &c_d, sizeof (int) * len));
H_ERR(hipMemcpy (a_d, a, sizeof (int) * len, hipMemcpyHostToDevice));
H_ERR(hipMemcpy (b_d, b, sizeof (int) * len, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( vec_add_kernel) , dim3(256), dim3(256) , 0, 0, a_d, b_d, c_d, len);
H_ERR(hipMemcpy (c, c_d, sizeof (int) * len, hipMemcpyDeviceToHost));
for (int i = 0; i < len; i++)
assert ((a[i] + b[i]) == c[i]);
printf ("Succeed!\n");
return 0;
}
| bdaa6b34bdf345d7ce23522781f4314e0801afce.cu | #include "../helper/util.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
__global__ void
vec_add_kernel (int *a, int *b, int *c, int len)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < 1000*len)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
int
main (int args, char **argv)
{
int len = (2<<10);
int *a = new int[len];
int *b = new int[len];
int *c = new int[len];
for (int i = 0; i < len; i++)
{
a[i] = rand () % 1024;
b[i] = rand () % 512;
}
int *a_d, *b_d, *c_d;
H_ERR(cudaMalloc ((void **) &a_d, sizeof (int) * len));
H_ERR(cudaMalloc ((void **) &b_d, sizeof (int) * len));
H_ERR(cudaMalloc ((void **) &c_d, sizeof (int) * len));
H_ERR(cudaMemcpy (a_d, a, sizeof (int) * len, cudaMemcpyHostToDevice));
H_ERR(cudaMemcpy (b_d, b, sizeof (int) * len, cudaMemcpyHostToDevice));
vec_add_kernel <<< 256, 256 >>> (a_d, b_d, c_d, len);
H_ERR(cudaMemcpy (c, c_d, sizeof (int) * len, cudaMemcpyDeviceToHost));
for (int i = 0; i < len; i++)
assert ((a[i] + b[i]) == c[i]);
printf ("Succeed!\n");
return 0;
}
|
11f342a932cb396a0785f850c6b8df179fd84241.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lietorch_gpu.h"
#include <Eigen/Dense>
#include "common.h"
#include "dispatch.h"
#include "so3.h"
#include "rxso3.h"
#include "se3.h"
#include "sim3.h"
#define GPU_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i += blockDim.x * gridDim.x)
#define NUM_THREADS 256
#define NUM_BLOCKS(batch_size) ((batch_size + NUM_THREADS - 1) / NUM_THREADS)
template <typename Group, typename scalar_t>
__global__ void exp_forward_kernel(const scalar_t* a_ptr, scalar_t* X_ptr, int num_threads) {
// exponential map forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Data>(X_ptr + i*Group::N) = Group::Exp(a).data();
}
}
template <typename Group, typename scalar_t>
__global__ void exp_backward_kernel(const scalar_t* grad, const scalar_t* a_ptr, scalar_t* da, int num_threads) {
// exponential map backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a(a_ptr + i*Group::K);
Grad dX(grad + i*Group::N);
Eigen::Map<Grad>(da + i*Group::K) = dX * Group::left_jacobian(a);
}
}
template <typename Group, typename scalar_t>
__global__ void log_forward_kernel(const scalar_t* X_ptr, scalar_t* a_ptr, int num_threads) {
// logarithm map forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a = Group(X_ptr + i*Group::N).Log();
Eigen::Map<Tangent>(a_ptr + i*Group::K) = a;
}
}
template <typename Group, typename scalar_t>
__global__ void log_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t* dX, int num_threads) {
// logarithm map backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a = Group(X_ptr + i*Group::N).Log();
Grad da(grad + i*Group::K);
Eigen::Map<Grad>(dX + i*Group::N) = da * Group::left_jacobian_inverse(a);
}
}
template <typename Group, typename scalar_t>
__global__ void inv_forward_kernel(const scalar_t* X_ptr, scalar_t* Y_ptr, int num_threads) {
// group inverse forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Data>(Y_ptr + i*Group::N) = X.inv().data();
}
}
template <typename Group, typename scalar_t>
__global__ void inv_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t *dX, int num_threads) {
// group inverse backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group Y = Group(X_ptr + i*Group::N).inv();
Grad dY(grad + i*Group::N);
Eigen::Map<Grad>(dX + i*Group::N) = -dY * Y.Adj();
}
}
template <typename Group, typename scalar_t>
__global__ void mul_forward_kernel(const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* Z_ptr, int num_threads) {
// group multiplication forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group Z = Group(X_ptr + i*Group::N) * Group(Y_ptr + i*Group::N);
Eigen::Map<Data>(Z_ptr + i*Group::N) = Z.data();
}
}
template <class Group, typename scalar_t>
__global__ void mul_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* dX, scalar_t* dY, int num_threads) {
// group multiplication backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Grad dZ(grad + i*Group::N);
Group X(X_ptr + i*Group::N);
Eigen::Map<Grad>(dX + i*Group::N) = dZ;
Eigen::Map<Grad>(dY + i*Group::N) = dZ * X.Adj();
}
}
template <typename Group, typename scalar_t>
__global__ void adj_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// adjoint forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(b_ptr + i*Group::K) = X.Adj(a);
}
}
template <typename Group, typename scalar_t>
__global__ void adj_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Grad db(grad + i*Group::K);
Tangent a(a_ptr + i*Group::K);
Tangent b = X.Adj() * a;
Eigen::Map<Grad>(da + i*Group::K) = db * X.Adj();
Eigen::Map<Grad>(dX + i*Group::N) = -db * Group::adj(b);
}
}
template <typename Group, typename scalar_t>
__global__ void adjT_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// adjoint forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(b_ptr + i*Group::K) = X.AdjT(a);
}
}
template <typename Group, typename scalar_t>
__global__ void adjT_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent db(grad + i*Group::K);
Grad a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(da + i*Group::K) = X.Adj(db);
Eigen::Map<Grad>(dX + i*Group::N) = -a * Group::adj(X.Adj(db));
}
}
template <typename Group, typename scalar_t>
__global__ void act_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) {
// action on point forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Point = Eigen::Matrix<scalar_t,3,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*3);
Eigen::Map<Point>(q_ptr + i*3) = X * p;
}
}
template <typename Group, typename scalar_t>
__global__ void act_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Point = Eigen::Matrix<scalar_t,3,1>;
using PointGrad = Eigen::Matrix<scalar_t,1,3>;
using Transformation = Eigen::Matrix<scalar_t,4,4>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*3);
PointGrad dq(grad + i*3);
Eigen::Map<PointGrad>(dp + i*3) = dq * X.Matrix4x4().block<3,3>(0,0);
Eigen::Map<Grad>(dX + i*Group::N) = dq * Group::act_jacobian(X*p);
}
}
template <typename Group, typename scalar_t>
__global__ void act4_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) {
// action on point forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Point = Eigen::Matrix<scalar_t,4,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*4);
Eigen::Map<Point>(q_ptr + i*4) = X.act4(p);
}
}
template <typename Group, typename scalar_t>
__global__ void act4_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Point = Eigen::Matrix<scalar_t,4,1>;
using PointGrad = Eigen::Matrix<scalar_t,1,4>;
using Transformation = Eigen::Matrix<scalar_t,4,4>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*4);
PointGrad dq(grad + i*4);
Eigen::Map<PointGrad>(dp + i*4) = dq * X.Matrix4x4();
const Point q = X.act4(p);
Eigen::Map<Grad>(dX + i*Group::N) = dq * Group::act4_jacobian(q);
}
}
template <typename Group, typename scalar_t>
__global__ void as_matrix_forward_kernel(const scalar_t* X_ptr, scalar_t* T_ptr, int num_threads) {
// convert to 4x4 matrix representation
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Matrix4 = Eigen::Matrix<scalar_t,4,4,Eigen::RowMajor>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Matrix4>(T_ptr + i*16) = X.Matrix4x4();
}
}
template <typename Group, typename scalar_t>
__global__ void orthogonal_projector_kernel(const scalar_t* X_ptr, scalar_t* P_ptr, int num_threads) {
// orthogonal projection matrix
using Proj = Eigen::Matrix<scalar_t,Group::N,Group::N,Eigen::RowMajor>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Proj>(P_ptr + i*Group::N*Group::N) = X.orthogonal_projector();
}
}
template <typename Group, typename scalar_t>
__global__ void jleft_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// left jacobian inverse action
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Tangent b = Group::left_jacobian_inverse(X.Log()) * a;
Eigen::Map<Tangent>(b_ptr + i*Group::K) = b;
}
}
// unary operations
torch::Tensor exp_forward_gpu(int group_id, torch::Tensor a) {
int batch_size = a.size(0);
torch::Tensor X;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_forward_kernel", ([&] {
X = torch::zeros({batch_size, group_t::N}, a.options());
hipLaunchKernelGGL(( exp_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
a.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
batch_size);
}));
return X;
}
std::vector<torch::Tensor> exp_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor a) {
int batch_size = a.size(0);
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_backward_kernel", ([&] {
hipLaunchKernelGGL(( exp_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {da};
}
torch::Tensor log_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor a;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_forward_kernel", ([&] {
a = torch::zeros({batch_size, group_t::K}, X.options());
hipLaunchKernelGGL(( log_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
batch_size);
}));
return a;
}
std::vector<torch::Tensor> log_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_backward_kernel", ([&] {
hipLaunchKernelGGL(( log_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
batch_size);
}));
return {dX};
}
torch::Tensor inv_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor Y = torch::zeros_like(X);
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_forward_kernel", ([&] {
hipLaunchKernelGGL(( inv_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
batch_size);
}));
return Y;
}
std::vector<torch::Tensor> inv_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_backward_kernel", ([&] {
hipLaunchKernelGGL(( inv_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
batch_size);
}));
return {dX};
}
// binary operations
torch::Tensor mul_forward_gpu(int group_id, torch::Tensor X, torch::Tensor Y) {
int batch_size = X.size(0);
torch::Tensor Z = torch::zeros_like(X);
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_forward_kernel", ([&] {
hipLaunchKernelGGL(( mul_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
Z.data_ptr<scalar_t>(),
batch_size);
}));
return Z;
}
std::vector<torch::Tensor> mul_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor Y) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dY = torch::zeros(Y.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_backward_kernel", ([&] {
hipLaunchKernelGGL(( mul_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dY.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dY};
}
torch::Tensor adj_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_forward_kernel", ([&] {
hipLaunchKernelGGL(( adj_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
std::vector<torch::Tensor> adj_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_backward_kernel", ([&] {
hipLaunchKernelGGL(( adj_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, da};
}
torch::Tensor adjT_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_forward_kernel", ([&] {
hipLaunchKernelGGL(( adjT_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
std::vector<torch::Tensor> adjT_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_backward_kernel", ([&] {
hipLaunchKernelGGL(( adjT_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, da};
}
torch::Tensor act_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor q = torch::zeros(p.sizes(), p.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_forward_kernel", ([&] {
hipLaunchKernelGGL(( act_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
q.data_ptr<scalar_t>(),
batch_size);
}));
return q;
}
std::vector<torch::Tensor> act_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dp = torch::zeros(p.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_backward_kernel", ([&] {
hipLaunchKernelGGL(( act_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dp.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dp};
}
torch::Tensor act4_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor q = torch::zeros(p.sizes(), p.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_forward_kernel", ([&] {
hipLaunchKernelGGL(( act4_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
q.data_ptr<scalar_t>(),
batch_size);
}));
return q;
}
std::vector<torch::Tensor> act4_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dp = torch::zeros(p.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_backward_kernel", ([&] {
hipLaunchKernelGGL(( act4_backward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dp.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dp};
}
torch::Tensor as_matrix_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor T4x4 = torch::zeros({X.size(0), 4, 4}, X.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "as_matrix_forward_kernel", ([&] {
hipLaunchKernelGGL(( as_matrix_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
T4x4.data_ptr<scalar_t>(),
batch_size);
}));
return T4x4;
}
torch::Tensor orthogonal_projector_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor P;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "orthogonal_projector_kernel", ([&] {
P = torch::zeros({X.size(0), group_t::N, group_t::N}, X.options());
hipLaunchKernelGGL(( orthogonal_projector_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
P.data_ptr<scalar_t>(),
batch_size);
}));
return P;
}
torch::Tensor jleft_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "jleft_forward_kernel", ([&] {
hipLaunchKernelGGL(( jleft_forward_kernel<group_t, scalar_t>), dim3(NUM_BLOCKS(batch_size)), dim3(NUM_THREADS), 0, 0,
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
| 11f342a932cb396a0785f850c6b8df179fd84241.cu |
#include "lietorch_gpu.h"
#include <Eigen/Dense>
#include "common.h"
#include "dispatch.h"
#include "so3.h"
#include "rxso3.h"
#include "se3.h"
#include "sim3.h"
#define GPU_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i += blockDim.x * gridDim.x)
#define NUM_THREADS 256
#define NUM_BLOCKS(batch_size) ((batch_size + NUM_THREADS - 1) / NUM_THREADS)
template <typename Group, typename scalar_t>
__global__ void exp_forward_kernel(const scalar_t* a_ptr, scalar_t* X_ptr, int num_threads) {
// exponential map forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Data>(X_ptr + i*Group::N) = Group::Exp(a).data();
}
}
template <typename Group, typename scalar_t>
__global__ void exp_backward_kernel(const scalar_t* grad, const scalar_t* a_ptr, scalar_t* da, int num_threads) {
// exponential map backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a(a_ptr + i*Group::K);
Grad dX(grad + i*Group::N);
Eigen::Map<Grad>(da + i*Group::K) = dX * Group::left_jacobian(a);
}
}
template <typename Group, typename scalar_t>
__global__ void log_forward_kernel(const scalar_t* X_ptr, scalar_t* a_ptr, int num_threads) {
// logarithm map forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a = Group(X_ptr + i*Group::N).Log();
Eigen::Map<Tangent>(a_ptr + i*Group::K) = a;
}
}
template <typename Group, typename scalar_t>
__global__ void log_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t* dX, int num_threads) {
// logarithm map backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a = Group(X_ptr + i*Group::N).Log();
Grad da(grad + i*Group::K);
Eigen::Map<Grad>(dX + i*Group::N) = da * Group::left_jacobian_inverse(a);
}
}
template <typename Group, typename scalar_t>
__global__ void inv_forward_kernel(const scalar_t* X_ptr, scalar_t* Y_ptr, int num_threads) {
// group inverse forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Data>(Y_ptr + i*Group::N) = X.inv().data();
}
}
template <typename Group, typename scalar_t>
__global__ void inv_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t *dX, int num_threads) {
// group inverse backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group Y = Group(X_ptr + i*Group::N).inv();
Grad dY(grad + i*Group::N);
Eigen::Map<Grad>(dX + i*Group::N) = -dY * Y.Adj();
}
}
template <typename Group, typename scalar_t>
__global__ void mul_forward_kernel(const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* Z_ptr, int num_threads) {
// group multiplication forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group Z = Group(X_ptr + i*Group::N) * Group(Y_ptr + i*Group::N);
Eigen::Map<Data>(Z_ptr + i*Group::N) = Z.data();
}
}
template <class Group, typename scalar_t>
__global__ void mul_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* dX, scalar_t* dY, int num_threads) {
// group multiplication backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Grad dZ(grad + i*Group::N);
Group X(X_ptr + i*Group::N);
Eigen::Map<Grad>(dX + i*Group::N) = dZ;
Eigen::Map<Grad>(dY + i*Group::N) = dZ * X.Adj();
}
}
template <typename Group, typename scalar_t>
__global__ void adj_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// adjoint forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(b_ptr + i*Group::K) = X.Adj(a);
}
}
template <typename Group, typename scalar_t>
__global__ void adj_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Grad db(grad + i*Group::K);
Tangent a(a_ptr + i*Group::K);
Tangent b = X.Adj() * a;
Eigen::Map<Grad>(da + i*Group::K) = db * X.Adj();
Eigen::Map<Grad>(dX + i*Group::N) = -db * Group::adj(b);
}
}
template <typename Group, typename scalar_t>
__global__ void adjT_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// adjoint forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(b_ptr + i*Group::K) = X.AdjT(a);
}
}
template <typename Group, typename scalar_t>
__global__ void adjT_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent db(grad + i*Group::K);
Grad a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(da + i*Group::K) = X.Adj(db);
Eigen::Map<Grad>(dX + i*Group::N) = -a * Group::adj(X.Adj(db));
}
}
template <typename Group, typename scalar_t>
__global__ void act_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) {
// action on point forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Point = Eigen::Matrix<scalar_t,3,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*3);
Eigen::Map<Point>(q_ptr + i*3) = X * p;
}
}
template <typename Group, typename scalar_t>
__global__ void act_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Point = Eigen::Matrix<scalar_t,3,1>;
using PointGrad = Eigen::Matrix<scalar_t,1,3>;
using Transformation = Eigen::Matrix<scalar_t,4,4>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*3);
PointGrad dq(grad + i*3);
Eigen::Map<PointGrad>(dp + i*3) = dq * X.Matrix4x4().block<3,3>(0,0);
Eigen::Map<Grad>(dX + i*Group::N) = dq * Group::act_jacobian(X*p);
}
}
template <typename Group, typename scalar_t>
__global__ void act4_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) {
// action on point forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Point = Eigen::Matrix<scalar_t,4,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*4);
Eigen::Map<Point>(q_ptr + i*4) = X.act4(p);
}
}
template <typename Group, typename scalar_t>
__global__ void act4_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Point = Eigen::Matrix<scalar_t,4,1>;
using PointGrad = Eigen::Matrix<scalar_t,1,4>;
using Transformation = Eigen::Matrix<scalar_t,4,4>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*4);
PointGrad dq(grad + i*4);
Eigen::Map<PointGrad>(dp + i*4) = dq * X.Matrix4x4();
const Point q = X.act4(p);
Eigen::Map<Grad>(dX + i*Group::N) = dq * Group::act4_jacobian(q);
}
}
template <typename Group, typename scalar_t>
__global__ void as_matrix_forward_kernel(const scalar_t* X_ptr, scalar_t* T_ptr, int num_threads) {
// convert to 4x4 matrix representation
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Matrix4 = Eigen::Matrix<scalar_t,4,4,Eigen::RowMajor>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Matrix4>(T_ptr + i*16) = X.Matrix4x4();
}
}
template <typename Group, typename scalar_t>
__global__ void orthogonal_projector_kernel(const scalar_t* X_ptr, scalar_t* P_ptr, int num_threads) {
// orthogonal projection matrix
using Proj = Eigen::Matrix<scalar_t,Group::N,Group::N,Eigen::RowMajor>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Proj>(P_ptr + i*Group::N*Group::N) = X.orthogonal_projector();
}
}
template <typename Group, typename scalar_t>
__global__ void jleft_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// left jacobian inverse action
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Tangent b = Group::left_jacobian_inverse(X.Log()) * a;
Eigen::Map<Tangent>(b_ptr + i*Group::K) = b;
}
}
// unary operations
torch::Tensor exp_forward_gpu(int group_id, torch::Tensor a) {
int batch_size = a.size(0);
torch::Tensor X;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_forward_kernel", ([&] {
X = torch::zeros({batch_size, group_t::N}, a.options());
exp_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
a.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
batch_size);
}));
return X;
}
std::vector<torch::Tensor> exp_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor a) {
int batch_size = a.size(0);
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_backward_kernel", ([&] {
exp_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {da};
}
torch::Tensor log_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor a;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_forward_kernel", ([&] {
a = torch::zeros({batch_size, group_t::K}, X.options());
log_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
batch_size);
}));
return a;
}
std::vector<torch::Tensor> log_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_backward_kernel", ([&] {
log_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
batch_size);
}));
return {dX};
}
torch::Tensor inv_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor Y = torch::zeros_like(X);
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_forward_kernel", ([&] {
inv_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
batch_size);
}));
return Y;
}
std::vector<torch::Tensor> inv_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_backward_kernel", ([&] {
inv_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
batch_size);
}));
return {dX};
}
// binary operations
torch::Tensor mul_forward_gpu(int group_id, torch::Tensor X, torch::Tensor Y) {
int batch_size = X.size(0);
torch::Tensor Z = torch::zeros_like(X);
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_forward_kernel", ([&] {
mul_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
Z.data_ptr<scalar_t>(),
batch_size);
}));
return Z;
}
std::vector<torch::Tensor> mul_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor Y) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dY = torch::zeros(Y.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_backward_kernel", ([&] {
mul_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dY.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dY};
}
torch::Tensor adj_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_forward_kernel", ([&] {
adj_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
std::vector<torch::Tensor> adj_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_backward_kernel", ([&] {
adj_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, da};
}
torch::Tensor adjT_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_forward_kernel", ([&] {
adjT_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
std::vector<torch::Tensor> adjT_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_backward_kernel", ([&] {
adjT_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, da};
}
torch::Tensor act_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor q = torch::zeros(p.sizes(), p.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_forward_kernel", ([&] {
act_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
q.data_ptr<scalar_t>(),
batch_size);
}));
return q;
}
std::vector<torch::Tensor> act_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dp = torch::zeros(p.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_backward_kernel", ([&] {
act_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dp.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dp};
}
torch::Tensor act4_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor q = torch::zeros(p.sizes(), p.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_forward_kernel", ([&] {
act4_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
q.data_ptr<scalar_t>(),
batch_size);
}));
return q;
}
std::vector<torch::Tensor> act4_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dp = torch::zeros(p.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_backward_kernel", ([&] {
act4_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dp.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dp};
}
torch::Tensor as_matrix_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor T4x4 = torch::zeros({X.size(0), 4, 4}, X.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "as_matrix_forward_kernel", ([&] {
as_matrix_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
T4x4.data_ptr<scalar_t>(),
batch_size);
}));
return T4x4;
}
torch::Tensor orthogonal_projector_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor P;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "orthogonal_projector_kernel", ([&] {
P = torch::zeros({X.size(0), group_t::N, group_t::N}, X.options());
orthogonal_projector_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
P.data_ptr<scalar_t>(),
batch_size);
}));
return P;
}
torch::Tensor jleft_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "jleft_forward_kernel", ([&] {
jleft_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
|
285f278a20eb3ad4ecc030142ab0a722be610fb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T, typename AccT, int D>
__global__ void _ReduceSumGrad(
const int N,
const SimpleArray<int, D> X_dims,
const SimpleArray<int, D> Y_dims,
const SimpleArray<int, D> Y_strides,
const AccT scale,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(xi, N) {
int yi = 0, tmp = xi;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(X_dims.data[d], tmp, &tmp, &r);
yi += (r % Y_dims.data[d]) * Y_strides.data[d];
}
dx[xi] = convert::To<T>(convert::To<AccT>(__ldg(dy + yi)) * scale);
}
}
template <typename T, typename AccT, int D>
void _ReduceSumGradImpl(
const int64_t* x_dims,
const int64_t* y_dims,
const int64_t* y_strides,
const AccT scale,
const T* dy,
T* dx,
CUDAContext* ctx) {
SimpleArray<int, D> X_dims, Y_dims, Y_strides;
const auto N =
std::accumulate(x_dims, x_dims + D, 1, std::multiplies<int64_t>());
for (int i = 0; i < D; ++i) {
X_dims.data[i] = x_dims[i];
Y_dims.data[i] = y_dims[i];
Y_strides.data[i] = y_strides[i];
}
hipLaunchKernelGGL(( _ReduceSumGrad), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, X_dims, Y_dims, Y_strides, scale, dy, dx);
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void ReduceSumGrad<T, CUDAContext>( \
const int num_dims, \
const int64_t* x_dims, \
const int64_t* y_dims, \
const int64_t* y_strides, \
const float scale, \
const T* dy, \
T* dx, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
DISPATCH_FUNC_BY_VALUE_WITH_TYPE_2( \
_ReduceSumGradImpl, \
math::ScalarType<T>::type, \
math::AccmulatorType<T>::type, \
num_dims, \
x_dims, \
y_dims, \
y_strides, \
convert::To<math::AccmulatorType<T>::type>(scale), \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
reinterpret_cast<math::ScalarType<T>::type*>(dx), \
ctx); \
}
DEFINE_GRAD_KERNEL_LAUNCHER(float16);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_ROCM
| 285f278a20eb3ad4ecc030142ab0a722be610fb4.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T, typename AccT, int D>
__global__ void _ReduceSumGrad(
const int N,
const SimpleArray<int, D> X_dims,
const SimpleArray<int, D> Y_dims,
const SimpleArray<int, D> Y_strides,
const AccT scale,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(xi, N) {
int yi = 0, tmp = xi;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(X_dims.data[d], tmp, &tmp, &r);
yi += (r % Y_dims.data[d]) * Y_strides.data[d];
}
dx[xi] = convert::To<T>(convert::To<AccT>(__ldg(dy + yi)) * scale);
}
}
template <typename T, typename AccT, int D>
void _ReduceSumGradImpl(
const int64_t* x_dims,
const int64_t* y_dims,
const int64_t* y_strides,
const AccT scale,
const T* dy,
T* dx,
CUDAContext* ctx) {
SimpleArray<int, D> X_dims, Y_dims, Y_strides;
const auto N =
std::accumulate(x_dims, x_dims + D, 1, std::multiplies<int64_t>());
for (int i = 0; i < D; ++i) {
X_dims.data[i] = x_dims[i];
Y_dims.data[i] = y_dims[i];
Y_strides.data[i] = y_strides[i];
}
_ReduceSumGrad<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, X_dims, Y_dims, Y_strides, scale, dy, dx);
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_GRAD_KERNEL_LAUNCHER(T) \
template <> \
void ReduceSumGrad<T, CUDAContext>( \
const int num_dims, \
const int64_t* x_dims, \
const int64_t* y_dims, \
const int64_t* y_strides, \
const float scale, \
const T* dy, \
T* dx, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
DISPATCH_FUNC_BY_VALUE_WITH_TYPE_2( \
_ReduceSumGradImpl, \
math::ScalarType<T>::type, \
math::AccmulatorType<T>::type, \
num_dims, \
x_dims, \
y_dims, \
y_strides, \
convert::To<math::AccmulatorType<T>::type>(scale), \
reinterpret_cast<const math::ScalarType<T>::type*>(dy), \
reinterpret_cast<math::ScalarType<T>::type*>(dx), \
ctx); \
}
DEFINE_GRAD_KERNEL_LAUNCHER(float16);
DEFINE_GRAD_KERNEL_LAUNCHER(float);
DEFINE_GRAD_KERNEL_LAUNCHER(double);
#undef DEFINE_GRAD_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_CUDA
|
f39131d7222b3a9fbc8d5a5c1b205fe37053f15b.hip | // !!! This is a file automatically generated by hipify!!!
// Basically lifted straight from test/cxx11_tensor_argmax_cuda.cu
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC argmax_all
#define EIGEN_USE_GPU
// this is needed on Beignet 1.2.1, Intel HD5500 (as far as I can tell?)
// #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int32_t
// #define CALL_SUBTEST_1(expr) expr
// #define CALL_SUBTEST_2(expr) expr
// #define CALL_SUBTEST_3(expr) expr
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_simple_argmax_tiny_rowmajor()
{
std::cout << "test" << std::endl;
#define N 5
Tensor<float, 1, RowMajor> in(Eigen::array<DenseIndex, 1>(N));
Tensor<DenseIndex, 1, RowMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in(0) = 3.0f;
in(1) = 4.0f;
in(2) = 7.0f;
in(3) = 5.0f;
in(4) = 1.0f;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out_max), out_bytes);
hipMemcpy(d_in, in.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 1>(N));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(hipMemcpyAsync(out_max.data(), d_out_max, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 2);
hipFree(d_in);
hipFree(d_out_max);
}
void test_cuda_simple_argmax_rowmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, RowMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, RowMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out_max), out_bytes);
hipMemcpy(d_in, in.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(hipMemcpyAsync(out_max.data(), d_out_max, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 37*53*97 + 43*97 + 88);
hipFree(d_in);
hipFree(d_out_max);
}
void test_cuda_simple_argmin_rowmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, RowMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, RowMajor> out_min(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_min.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_min;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out_min), out_bytes);
hipMemcpy(d_in, in.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
gpu_out_min.device(gpu_device) = gpu_in.argmin();
assert(hipMemcpyAsync(out_min.data(), d_out_min, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 21*53*97 + 29*97 + 76);
hipFree(d_in);
hipFree(d_out_min);
}
void test_cuda_simple_argmax_colmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, ColMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, ColMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out_max), out_bytes);
hipMemcpy(d_in, in.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, ColMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, ColMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(hipMemcpyAsync(out_max.data(), d_out_max, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 88*53*72 + 43*72 + 37);
hipFree(d_in);
hipFree(d_out_max);
}
void test_cuda_simple_argmin_colmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, ColMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, ColMajor> out_min(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_min.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_min;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out_min), out_bytes);
hipMemcpy(d_in, in.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, ColMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, ColMajor>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
gpu_out_min.device(gpu_device) = gpu_in.argmin();
assert(hipMemcpyAsync(out_min.data(), d_out_min, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 76*53*72 + 29*72 + 21);
hipFree(d_in);
hipFree(d_out_min);
}
template <int DataLayout>
void test_cuda_argmax_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
for (int dim = 0; dim < 4; ++dim) {
tensor.setRandom();
tensor = (tensor + tensor.constant(0.5)).log();
array<DenseIndex, 3> out_shape;
for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
array<DenseIndex, 4> ix;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != 0) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
tensor(ix) = 10.0;
}
}
}
}
std::size_t in_bytes = tensor.size() * sizeof(float);
std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out), out_bytes);
hipMemcpy(d_in, tensor.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
assert(hipMemcpyAsync(tensor_arg.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
size_t(2*3*5*7 / tensor.dimension(dim)));
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != tensor.dimension(dim) - 1) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
tensor(ix) = 20.0;
}
}
}
}
hipMemcpy(d_in, tensor.data(), in_bytes, hipMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
assert(hipMemcpyAsync(tensor_arg.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
hipFree(d_in);
hipFree(d_out);
}
}
template <int DataLayout>
void test_cuda_argmin_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
for (int dim = 0; dim < 4; ++dim) {
tensor.setRandom();
tensor = (tensor + tensor.constant(0.5)).log();
array<DenseIndex, 3> out_shape;
for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
array<DenseIndex, 4> ix;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != 0) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
tensor(ix) = -10.0;
}
}
}
}
std::size_t in_bytes = tensor.size() * sizeof(float);
std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out;
hipMalloc((void**)(&d_in), in_bytes);
hipMalloc((void**)(&d_out), out_bytes);
hipMemcpy(d_in, tensor.data(), in_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
assert(hipMemcpyAsync(tensor_arg.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
2*3*5*7 / tensor.dimension(dim));
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect min to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != tensor.dimension(dim) - 1) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
tensor(ix) = -20.0;
}
}
}
}
hipMemcpy(d_in, tensor.data(), in_bytes, hipMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
assert(hipMemcpyAsync(tensor_arg.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
hipFree(d_in);
hipFree(d_out);
}
}
void test_argmax_all()
{
std::cout << "test_argmax_all()" << std::endl;
test_cuda_simple_argmax_tiny_rowmajor();
test_cuda_simple_argmax_rowmajor();
test_cuda_simple_argmin_rowmajor();
test_cuda_simple_argmax_colmajor();
test_cuda_simple_argmin_colmajor();
test_cuda_argmax_dim<RowMajor>();
test_cuda_argmax_dim<ColMajor>();
test_cuda_argmin_dim<RowMajor>();
test_cuda_argmin_dim<ColMajor>();
}
| f39131d7222b3a9fbc8d5a5c1b205fe37053f15b.cu | // Basically lifted straight from test/cxx11_tensor_argmax_cuda.cu
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC argmax_all
#define EIGEN_USE_GPU
// this is needed on Beignet 1.2.1, Intel HD5500 (as far as I can tell?)
// #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int32_t
// #define CALL_SUBTEST_1(expr) expr
// #define CALL_SUBTEST_2(expr) expr
// #define CALL_SUBTEST_3(expr) expr
#include <unsupported/Eigen/CXX11/Tensor>
#include "main.h"
#include <iostream>
using Eigen::Tensor;
void test_cuda_simple_argmax_tiny_rowmajor()
{
std::cout << "test" << std::endl;
#define N 5
Tensor<float, 1, RowMajor> in(Eigen::array<DenseIndex, 1>(N));
Tensor<DenseIndex, 1, RowMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in(0) = 3.0f;
in(1) = 4.0f;
in(2) = 7.0f;
in(3) = 5.0f;
in(4) = 1.0f;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_max), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 1>(N));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 2);
cudaFree(d_in);
cudaFree(d_out_max);
}
void test_cuda_simple_argmax_rowmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, RowMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, RowMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_max), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 37*53*97 + 43*97 + 88);
cudaFree(d_in);
cudaFree(d_out_max);
}
void test_cuda_simple_argmin_rowmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, RowMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, RowMajor> out_min(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_min.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_min;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_min), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, RowMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, RowMajor>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
gpu_out_min.device(gpu_device) = gpu_in.argmin();
assert(cudaMemcpyAsync(out_min.data(), d_out_min, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 21*53*97 + 29*97 + 76);
cudaFree(d_in);
cudaFree(d_out_min);
}
void test_cuda_simple_argmax_colmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, ColMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, ColMajor> out_max(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_max.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_max;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_max), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, ColMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, ColMajor>, Aligned > gpu_out_max(d_out_max, Eigen::array<DenseIndex, 1>(1));
gpu_out_max.device(gpu_device) = gpu_in.argmax();
assert(cudaMemcpyAsync(out_max.data(), d_out_max, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_max(Eigen::array<DenseIndex, 1>(0)), 88*53*72 + 43*72 + 37);
cudaFree(d_in);
cudaFree(d_out_max);
}
void test_cuda_simple_argmin_colmajor()
{
std::cout << "test" << std::endl;
Tensor<float, 3, ColMajor> in(Eigen::array<DenseIndex, 3>(72,53,97));
Tensor<DenseIndex, 1, ColMajor> out_min(Eigen::array<DenseIndex, 1>(1));
in.setRandom();
in *= in.constant(100.0);
in(21, 29, 76) = -1000.0;
in(37, 43, 88) = 1000.0;
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = out_min.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out_min;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out_min), out_bytes);
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, ColMajor>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 3>(72,53,97));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 1, ColMajor>, Aligned > gpu_out_min(d_out_min, Eigen::array<DenseIndex, 1>(1));
gpu_out_min.device(gpu_device) = gpu_in.argmin();
assert(cudaMemcpyAsync(out_min.data(), d_out_min, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(out_min(Eigen::array<DenseIndex, 1>(0)), 76*53*72 + 29*72 + 21);
cudaFree(d_in);
cudaFree(d_out_min);
}
template <int DataLayout>
void test_cuda_argmax_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
for (int dim = 0; dim < 4; ++dim) {
tensor.setRandom();
tensor = (tensor + tensor.constant(0.5)).log();
array<DenseIndex, 3> out_shape;
for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
array<DenseIndex, 4> ix;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != 0) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
tensor(ix) = 10.0;
}
}
}
}
std::size_t in_bytes = tensor.size() * sizeof(float);
std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
size_t(2*3*5*7 / tensor.dimension(dim)));
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != tensor.dimension(dim) - 1) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
tensor(ix) = 20.0;
}
}
}
}
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmax(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
cudaFree(d_in);
cudaFree(d_out);
}
}
template <int DataLayout>
void test_cuda_argmin_dim()
{
Tensor<float, 4, DataLayout> tensor(2,3,5,7);
std::vector<int> dims;
dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7);
for (int dim = 0; dim < 4; ++dim) {
tensor.setRandom();
tensor = (tensor + tensor.constant(0.5)).log();
array<DenseIndex, 3> out_shape;
for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1];
Tensor<DenseIndex, 3, DataLayout> tensor_arg(out_shape);
array<DenseIndex, 4> ix;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != 0) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0
tensor(ix) = -10.0;
}
}
}
}
std::size_t in_bytes = tensor.size() * sizeof(float);
std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex);
float* d_in;
DenseIndex* d_out;
cudaMalloc((void**)(&d_in), in_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout>, Aligned > gpu_in(d_in, Eigen::array<DenseIndex, 4>(2, 3, 5, 7));
Eigen::TensorMap<Eigen::Tensor<DenseIndex, 3, DataLayout>, Aligned > gpu_out(d_out, out_shape);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
VERIFY_IS_EQUAL(tensor_arg.size(),
2*3*5*7 / tensor.dimension(dim));
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect min to be in the first index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], 0);
}
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 7; ++l) {
ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l;
if (ix[dim] != tensor.dimension(dim) - 1) continue;
// suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0
tensor(ix) = -20.0;
}
}
}
}
cudaMemcpy(d_in, tensor.data(), in_bytes, cudaMemcpyHostToDevice);
gpu_out.device(gpu_device) = gpu_in.argmin(dim);
assert(cudaMemcpyAsync(tensor_arg.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (DenseIndex n = 0; n < tensor_arg.size(); ++n) {
// Expect max to be in the last index of the reduced dimension
VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1);
}
cudaFree(d_in);
cudaFree(d_out);
}
}
void test_argmax_all()
{
std::cout << "test_argmax_all()" << std::endl;
test_cuda_simple_argmax_tiny_rowmajor();
test_cuda_simple_argmax_rowmajor();
test_cuda_simple_argmin_rowmajor();
test_cuda_simple_argmax_colmajor();
test_cuda_simple_argmin_colmajor();
test_cuda_argmax_dim<RowMajor>();
test_cuda_argmax_dim<ColMajor>();
test_cuda_argmin_dim<RowMajor>();
test_cuda_argmin_dim<ColMajor>();
}
|
310b603c54c6dd48e8f4156b695e17446a121ed8.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 XGBoost contributors
*/
#include <dmlc/omp.h>
#include <dmlc/timer.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <vector>
#include <algorithm>
#include <utility>
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "../common/math.h"
#include "../common/random.h"
#if defined(__HIPCC__)
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/random/linear_congruential_engine.h>
#include <hipcub/hipcub.hpp>
#include "../common/device_helpers.cuh"
#endif
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(rank_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {
size_t num_pairsample;
float fix_list_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(LambdaRankParam) {
DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1)
.describe("Number of pair generated for each instance.");
DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f)
.describe("Normalize the weight of each list by this value,"
" if equals 0, no effect will happen");
}
};
#if defined(__HIPCC__)
// Helper functions
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheLeftOf(const T * __restrict__ items, uint32_t n, T v) {
return dh::LowerBound(items, n, v, thrust::greater<T>());
}
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheRightOf(const T * __restrict__ items, uint32_t n, T v) {
return n - dh::UpperBound(items, n, v, thrust::greater<T>());
}
#endif
/*! \brief helper information in a list */
struct ListEntry {
/*! \brief the predict score we in the data */
bst_float pred;
/*! \brief the actual label of the entry */
bst_float label;
/*! \brief row index in the data matrix */
unsigned rindex;
// constructor
ListEntry(bst_float pred, bst_float label, unsigned rindex)
: pred(pred), label(label), rindex(rindex) {}
// comparator by prediction
inline static bool CmpPred(const ListEntry &a, const ListEntry &b) {
return a.pred > b.pred;
}
// comparator by label
inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) {
return a.label > b.label;
}
};
/*! \brief a pair in the lambda rank */
struct LambdaPair {
/*! \brief positive index: this is a position in the list */
unsigned pos_index;
/*! \brief negative index: this is a position in the list */
unsigned neg_index;
/*! \brief weight to be filled in */
bst_float weight;
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index)
: pos_index(pos_index), neg_index(neg_index), weight(1.0f) {}
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight)
: pos_index(pos_index), neg_index(neg_index), weight(weight) {}
};
class PairwiseLambdaWeightComputer {
public:
/*!
* \brief get lambda weight for existing pairs - for pairwise objective
* \param list a list that is sorted by pred score
* \param io_pairs record of pairs, containing the pairs to fill in weights
*/
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {}
static char const* Name() {
return "rank:pairwise";
}
#if defined(__HIPCC__)
PairwiseLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {}
class PairwiseLambdaWeightMultiplier {
public:
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
return 1.0f;
}
};
inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const {
return {};
}
#endif
};
#if defined(__HIPCC__)
class BaseLambdaWeightMultiplier {
public:
BaseLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const dh::SegmentSorter<float> &segment_pred_sorter)
: dsorted_labels_(segment_label_sorter.GetItemsSpan()),
dorig_pos_(segment_label_sorter.GetOriginalPositionsSpan()),
dgroups_(segment_label_sorter.GetGroupsSpan()),
dindexable_sorted_preds_pos_(segment_pred_sorter.GetIndexableSortedPositionsSpan()) {}
protected:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dorig_pos_; // Original indices of the labels
// before they are sorted
const common::Span<const uint32_t> dgroups_; // The group indices
// Where can a prediction for a label be found in the original array, when they are sorted
const common::Span<const uint32_t> dindexable_sorted_preds_pos_;
};
// While computing the weight that needs to be adjusted by this ranking objective, we need
// to figure out where positive and negative labels chosen earlier exists, if the group
// were to be sorted by its predictions. To accommodate this, we employ the following algorithm.
// For a given group, let's assume the following:
// labels: 1 5 9 2 4 8 0 7 6 3
// predictions: 1 9 0 8 2 7 3 6 5 4
// position: 0 1 2 3 4 5 6 7 8 9
//
// After label sort:
// labels: 9 8 7 6 5 4 3 2 1 0
// position: 2 5 7 8 1 4 9 3 0 6
//
// After prediction sort:
// predictions: 9 8 7 6 5 4 3 2 1 0
// position: 1 3 5 7 8 9 6 4 0 2
//
// If a sorted label at position 'x' is chosen, then we need to find out where the prediction
// for this label 'x' exists, if the group were to be sorted by predictions.
// We first take the sorted prediction positions:
// position: 1 3 5 7 8 9 6 4 0 2
// at indices: 0 1 2 3 4 5 6 7 8 9
//
// We create a sorted prediction positional array, such that value at position 'x' gives
// us the position in the sorted prediction array where its related prediction lies.
// dindexable_sorted_preds_pos_: 8 0 9 1 7 2 6 3 4 5
// at indices: 0 1 2 3 4 5 6 7 8 9
// Basically, swap the previous 2 arrays, sort the indices and reorder positions
// for an O(1) lookup using the position where the sorted label exists.
//
// This type does that using the SegmentSorter
class IndexablePredictionSorter {
public:
IndexablePredictionSorter(const bst_float *dpreds,
const dh::SegmentSorter<float> &segment_label_sorter) {
// Sort the predictions first
segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(),
segment_label_sorter.GetGroupSegmentsSpan());
// Create an index for the sorted prediction positions
segment_pred_sorter_.CreateIndexableSortedPositions();
}
inline const dh::SegmentSorter<float> &GetPredictionSorter() const {
return segment_pred_sorter_;
}
private:
dh::SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions
};
#endif
// beta version: NDCG lambda rank
class NDCGLambdaWeightComputer
#if defined(__HIPCC__)
: public IndexablePredictionSorter
#endif
{
public:
#if defined(__HIPCC__)
// This function object computes the item's DCG value
class ComputeItemDCG : public thrust::unary_function<uint32_t, float> {
public:
XGBOOST_DEVICE ComputeItemDCG(const common::Span<const float> &dsorted_labels,
const common::Span<const uint32_t> &dgroups,
const common::Span<const uint32_t> &gidxs)
: dsorted_labels_(dsorted_labels),
dgroups_(dgroups),
dgidxs_(gidxs) {}
// Compute DCG for the item at 'idx'
__device__ __forceinline__ float operator()(uint32_t idx) const {
return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]);
}
private:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dgroups_; // The group indices - where each group
// begins and ends
const common::Span<const uint32_t> dgidxs_; // The group each items belongs to
};
// Type containing device pointers that can be cheaply copied on the kernel
class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
NDCGLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const NDCGLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dgroup_dcgs_(lwc.GetGroupDcgsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
if (dgroup_dcgs_[gidx] == 0.0) return 0.0f;
uint32_t group_begin = dgroups_[gidx];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return NDCGLambdaWeightComputer::ComputeDeltaWeight(
pos_pred_pos, neg_pred_pos,
static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]),
dgroup_dcgs_[gidx]);
}
private:
const common::Span<const float> dgroup_dcgs_; // Group DCG values
};
NDCGLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f),
weight_multiplier_(segment_label_sorter, *this) {
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Allocator to be used for managing space overhead while performing transformed reductions
dh::XGBCachingDeviceAllocator<char> alloc;
// Compute each elements DCG values and reduce them across groups concurrently.
auto end_range =
thrust::reduce_by_key(thrust::hip::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of items DCG values within a group
dh::tcbegin(segment_label_sorter.GetOriginalPositionsSpan()),
ComputeItemDCG(segment_label_sorter.GetItemsSpan(),
segment_label_sorter.GetGroupsSpan(),
group_segments)),
thrust::make_discard_iterator(), // We don't care for the group indices
dgroup_dcg_.begin()); // Sum of the item's DCG values in the group
CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size());
}
inline const common::Span<const float> GetGroupDcgsSpan() const {
return { dgroup_dcg_.data().get(), dgroup_dcg_.size() };
}
inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const {
return weight_multiplier_;
}
#endif
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
float IDCG; // NOLINT
{
std::vector<bst_float> labels(sorted_list.size());
for (size_t i = 0; i < sorted_list.size(); ++i) {
labels[i] = sorted_list[i].label;
}
std::stable_sort(labels.begin(), labels.end(), std::greater<bst_float>());
IDCG = ComputeGroupDCGWeight(&labels[0], labels.size());
}
if (IDCG == 0.0) {
for (auto & pair : pairs) {
pair.weight = 0.0f;
}
} else {
for (auto & pair : pairs) {
unsigned pos_idx = pair.pos_index;
unsigned neg_idx = pair.neg_index;
pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx,
sorted_list[pos_idx].label, sorted_list[neg_idx].label,
IDCG);
}
}
}
static char const* Name() {
return "rank:ndcg";
}
inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) {
double sumdcg = 0.0;
for (uint32_t i = 0; i < size; ++i) {
sumdcg += ComputeItemDCGWeight(sorted_labels[i], i);
}
return static_cast<bst_float>(sumdcg);
}
private:
XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) {
return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0;
}
// Compute the weight adjustment for an item within a group:
// pos_pred_pos => Where does the positive label live, had the list been sorted by prediction
// neg_pred_pos => Where does the negative label live, had the list been sorted by prediction
// pos_label => positive label value from sorted label list
// neg_label => negative label value from sorted label list
XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos,
uint32_t neg_pred_pos,
int pos_label, int neg_label,
float idcg) {
float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f);
float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f);
bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv;
float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv;
bst_float delta = (original - changed) * (1.0f / idcg);
if (delta < 0.0f) delta = - delta;
return delta;
}
#if defined(__HIPCC__)
dh::caching_device_vector<float> dgroup_dcg_;
// This computes the adjustment to the weight
const NDCGLambdaWeightMultiplier weight_multiplier_;
#endif
};
class MAPLambdaWeightComputer
#if defined(__HIPCC__)
: public IndexablePredictionSorter
#endif
{
public:
struct MAPStats {
/*! \brief the accumulated precision */
float ap_acc{0.0f};
/*!
* \brief the accumulated precision,
* assuming a positive instance is missing
*/
float ap_acc_miss{0.0f};
/*!
* \brief the accumulated precision,
* assuming that one more positive instance is inserted ahead
*/
float ap_acc_add{0.0f};
/* \brief the accumulated positive instance count */
float hits{0.0f};
XGBOOST_DEVICE MAPStats() {} // NOLINT
XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits)
: ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {}
// For prefix scan
XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const {
return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss,
ap_acc_add + v1.ap_acc_add, hits + v1.hits};
}
// For test purposes - compare for equality
XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const {
return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss &&
ap_acc_add == rhs.ap_acc_add && hits == rhs.hits;
}
};
private:
template <typename T>
XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) {
#if defined(__HIPCC__)
thrust::swap(v0, v1);
#else
std::swap(v0, v1);
#endif
}
/*!
* \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or
* neg_pred_pos when sorted by predictions
* \param pos_pred_pos positive label's prediction value position when the groups prediction
* values are sorted
* \param neg_pred_pos negative label's prediction value position when the groups prediction
* values are sorted
* \param pos_label, neg_label the chosen positive and negative labels
* \param p_map_stats a vector containing the accumulated precisions for each position in a list
* \param map_stats_size size of the accumulated precisions vector
*/
XGBOOST_DEVICE inline static bst_float GetLambdaMAP(
int pos_pred_pos, int neg_pred_pos,
bst_float pos_label, bst_float neg_label,
const MAPStats *p_map_stats, uint32_t map_stats_size) {
if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) {
return 0.0f;
}
if (pos_pred_pos > neg_pred_pos) {
Swap(pos_pred_pos, neg_pred_pos);
Swap(pos_label, neg_label);
}
bst_float original = p_map_stats[neg_pred_pos].ap_acc;
if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc;
bst_float changed = 0;
bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f;
bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f;
if (label1 == label2) {
return 0.0;
} else if (label1 < label2) {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add;
changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1);
} else {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss;
changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1);
}
bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits);
if (ans < 0) ans = -ans;
return ans;
}
public:
/*
* \brief obtain preprocessing results for calculating delta MAP
* \param sorted_list the list containing entry information
* \param map_stats a vector containing the accumulated precisions for each position in a list
*/
inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list,
std::vector<MAPStats> *p_map_acc) {
std::vector<MAPStats> &map_acc = *p_map_acc;
map_acc.resize(sorted_list.size());
bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0;
for (size_t i = 1; i <= sorted_list.size(); ++i) {
if (sorted_list[i - 1].label > 0.0f) {
hit++;
acc1 += hit / i;
acc2 += (hit - 1) / i;
acc3 += (hit + 1) / i;
}
map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit);
}
}
static char const* Name() {
return "rank:map";
}
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
std::vector<MAPStats> map_stats;
GetMAPStats(sorted_list, &map_stats);
for (auto & pair : pairs) {
pair.weight *=
GetLambdaMAP(pair.pos_index, pair.neg_index,
sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label,
&map_stats[0], map_stats.size());
}
}
#if defined(__HIPCC__)
MAPLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()),
weight_multiplier_(segment_label_sorter, *this) {
this->CreateMAPStats(dlabels, segment_label_sorter);
}
void CreateMAPStats(const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {
// For each group, go through the sorted prediction positions, and look up its corresponding
// label from the unsorted labels (from the original label list)
// For each item in the group, compute its MAP stats.
// Interleave the computation of map stats amongst different groups.
// First, determine postive labels in the dataset individually
auto nitems = segment_label_sorter.GetNumItems();
dh::caching_device_vector<uint32_t> dhits(nitems, 0);
// Original positions of the predictions after they have been sorted
const auto &pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsSpan();
// Unsorted labels
const float *unsorted_labels = dlabels;
auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) {
return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0;
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
dhits.begin(),
DeterminePositiveLabelLambda);
// Allocator to be used by sort for managing space overhead while performing prefix scans
dh::XGBCachingDeviceAllocator<char> alloc;
// Next, prefix scan the positive labels that are segmented to accumulate them.
// This is required for computing the accumulated precisions
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Data segmented into different groups...
thrust::inclusive_scan_by_key(thrust::hip::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
dhits.begin(), // Input value
dhits.begin()); // In-place scan
// Compute accumulated precisions for each item, assuming positive and
// negative instances are missing.
// But first, compute individual item precisions
const auto *dhits_arr = dhits.data().get();
// Group info on device
const auto &dgroups = segment_label_sorter.GetGroupsSpan();
auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) {
if (unsorted_labels[pred_original_pos[idx]] > 0.0f) {
auto idx_within_group = (idx - dgroups[group_segments[idx]]) + 1;
return MAPStats{static_cast<float>(dhits_arr[idx]) / idx_within_group,
static_cast<float>(dhits_arr[idx] - 1) / idx_within_group,
static_cast<float>(dhits_arr[idx] + 1) / idx_within_group,
1.0f};
}
return MAPStats{};
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
this->dmap_stats_.begin(),
ComputeItemPrecisionLambda);
// Lastly, compute the accumulated precisions for all the items segmented by groups.
// The precisions are accumulated within each group
thrust::inclusive_scan_by_key(thrust::hip::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
this->dmap_stats_.begin(), // Input map stats
this->dmap_stats_.begin()); // In-place scan and output here
}
inline const common::Span<const MAPStats> GetMapStatsSpan() const {
return { dmap_stats_.data().get(), dmap_stats_.size() };
}
// Type containing device pointers that can be cheaply copied on the kernel
class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
MAPLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const MAPLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dmap_stats_(lwc.GetMapStatsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
uint32_t group_begin = dgroups_[gidx];
uint32_t group_end = dgroups_[gidx + 1];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return MAPLambdaWeightComputer::GetLambdaMAP(
pos_pred_pos, neg_pred_pos,
dsorted_labels_[pidx], dsorted_labels_[nidx],
&dmap_stats_[group_begin], group_end - group_begin);
}
private:
common::Span<const MAPStats> dmap_stats_; // Start address of the map stats for every sorted
// prediction value
};
inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; }
private:
dh::caching_device_vector<MAPStats> dmap_stats_;
// This computes the adjustment to the weight
const MAPLambdaWeightMultiplier weight_multiplier_;
#endif
};
#if defined(__HIPCC__)
class SortedLabelList : dh::SegmentSorter<float> {
private:
const LambdaRankParam ¶m_; // Objective configuration
public:
explicit SortedLabelList(const LambdaRankParam ¶m)
: param_(param) {}
// Sort the labels that are grouped by 'groups'
void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) {
this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups);
}
// This kernel can only run *after* the kernel in sort is completed, as they
// use the default stream
template <typename LambdaWeightComputerT>
void ComputeGradients(const bst_float *dpreds, // Unsorted predictions
const bst_float *dlabels, // Unsorted labels
const HostDeviceVector<bst_float> &weights,
int iter,
GradientPair *out_gpair,
float weight_normalization_factor) {
// Group info on device
const auto &dgroups = this->GetGroupsSpan();
uint32_t ngroups = this->GetNumGroups() + 1;
uint32_t total_items = this->GetNumItems();
uint32_t niter = param_.num_pairsample * total_items;
float fix_list_weight = param_.fix_list_weight;
const auto &original_pos = this->GetOriginalPositionsSpan();
uint32_t num_weights = weights.Size();
auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr;
const auto &sorted_labels = this->GetItemsSpan();
// This is used to adjust the weight of different elements based on the different ranking
// objective function policies
LambdaWeightComputerT weight_computer(dpreds, dlabels, *this);
auto wmultiplier = weight_computer.GetWeightMultiplier();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// For each instance in the group, compute the gradient pair concurrently
dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) {
// First, determine the group 'idx' belongs to
uint32_t item_idx = idx % total_items;
uint32_t group_idx = dh::UpperBound(dgroups.data(), ngroups, item_idx);
// Span of this group within the larger labels/predictions sorted tuple
uint32_t group_begin = dgroups[group_idx - 1];
uint32_t group_end = dgroups[group_idx];
uint32_t total_group_items = group_end - group_begin;
// Are the labels diverse enough? If they are all the same, then there is nothing to pick
// from another group - bail sooner
if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return;
// Find the number of labels less than and greater than the current label
// at the sorted index position item_idx
uint32_t nleft = CountNumItemsToTheLeftOf(
sorted_labels.data() + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]);
uint32_t nright = CountNumItemsToTheRightOf(
sorted_labels.data() + item_idx, group_end - item_idx, sorted_labels[item_idx]);
// Create a minstd_rand object to act as our source of randomness
thrust::minstd_rand rng((iter + 1) * 1111);
rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin);
// Create a uniform_int_distribution to produce a sample from outside of the
// present label group
thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1);
int sample = dist(rng);
int pos_idx = -1; // Bigger label
int neg_idx = -1; // Smaller label
// Are we picking a sample to the left/right of the current group?
if (sample < nleft) {
// Go left
pos_idx = sample + group_begin;
neg_idx = item_idx;
} else {
pos_idx = item_idx;
uint32_t items_in_group = total_group_items - nleft - nright;
neg_idx = sample + items_in_group + group_begin;
}
// Compute and assign the gradients now
const float eps = 1e-16f;
bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]);
bst_float g = p - 1.0f;
bst_float h = thrust::max(p * (1.0f - p), eps);
// Rescale each gradient and hessian so that the group has a weighted constant
float scale = __frcp_ru(niter / total_items);
if (fix_list_weight != 0.0f) {
scale *= fix_list_weight / total_group_items;
}
float weight = num_weights ? dweights[group_idx - 1] : 1.0f;
weight *= weight_normalization_factor;
weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx);
weight *= scale;
// Accumulate gradient and hessian in both positive and negative indices
const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair);
const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair);
});
// Wait until the computations done by the kernel is complete
dh::safe_cuda(hipStreamSynchronize(nullptr));
}
};
#endif
// objective for lambda rank
template <typename LambdaWeightComputerT>
class LambdaRankObj : public ObjFunction {
public:
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match";
// quick consistency when group is not available
std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_;
CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size())
<< "group structure not consistent with #rows" << ", "
<< "group ponter size: " << gptr.size() << ", "
<< "labels size: " << info.labels_.Size() << ", "
<< "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back());
#if defined(__HIPCC__)
// Check if we have a GPU assignment; else, revert back to CPU
auto device = tparam_->gpu_id;
if (device >= 0) {
ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr);
} else {
// Revert back to CPU
#endif
ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr);
#if defined(__HIPCC__)
}
#endif
}
const char* DefaultEvalMetric() const override {
return "map";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(LambdaWeightComputerT::Name());
out["lambda_rank_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["lambda_rank_param"], ¶m_);
}
private:
bst_float ComputeWeightNormalizationFactor(const MetaInfo& info,
const std::vector<unsigned> &gptr) {
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
bst_float sum_weights = 0;
for (bst_omp_uint k = 0; k < ngroup; ++k) {
sum_weights += info.GetWeight(k);
}
return ngroup / sum_weights;
}
void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU.";
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
const auto& preds_h = preds.HostVector();
const auto& labels = info.labels_.HostVector();
std::vector<GradientPair>& gpair = out_gpair->HostVector();
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
out_gpair->Resize(preds.Size());
#pragma omp parallel
{
// parallel construct, declare random number generator here, so that each
// thread use its own random number generator, seed by thread id and current iteration
std::minstd_rand rnd((iter + 1) * 1111);
std::vector<LambdaPair> pairs;
std::vector<ListEntry> lst;
std::vector< std::pair<bst_float, unsigned> > rec;
#pragma omp for schedule(static)
for (bst_omp_uint k = 0; k < ngroup; ++k) {
lst.clear(); pairs.clear();
for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) {
lst.emplace_back(preds_h[j], labels[j], j);
gpair[j] = GradientPair(0.0f, 0.0f);
}
std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred);
rec.resize(lst.size());
for (unsigned i = 0; i < lst.size(); ++i) {
rec[i] = std::make_pair(lst[i].label, i);
}
std::stable_sort(rec.begin(), rec.end(), common::CmpFirst);
// enumerate buckets with same label, for each item in the lst, grab another sample randomly
for (unsigned i = 0; i < rec.size(); ) {
unsigned j = i + 1;
while (j < rec.size() && rec[j].first == rec[i].first) ++j;
// bucket in [i,j), get a sample outside bucket
unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j);
if (nleft + nright != 0) {
int nsample = param_.num_pairsample;
while (nsample --) {
for (unsigned pid = i; pid < j; ++pid) {
unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd);
if (ridx < nleft) {
pairs.emplace_back(rec[ridx].second, rec[pid].second,
info.GetWeight(k) * weight_normalization_factor);
} else {
pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second,
info.GetWeight(k) * weight_normalization_factor);
}
}
}
}
i = j;
}
// get lambda weight for the pairs
LambdaWeightComputerT::GetLambdaWeight(lst, &pairs);
// rescale each gradient and hessian so that the lst have constant weighted
float scale = 1.0f / param_.num_pairsample;
if (param_.fix_list_weight != 0.0f) {
scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]);
}
for (auto & pair : pairs) {
const ListEntry &pos = lst[pair.pos_index];
const ListEntry &neg = lst[pair.neg_index];
const bst_float w = pair.weight * scale;
const float eps = 1e-16f;
bst_float p = common::Sigmoid(pos.pred - neg.pred);
bst_float g = p - 1.0f;
bst_float h = ::max(p * (1.0f - p), eps);
// accumulate gradient and hessian in both pid, and nid
gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h);
gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h);
}
}
}
}
#if defined(__HIPCC__)
void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU.";
auto device = tparam_->gpu_id;
dh::safe_cuda(hipSetDevice(device));
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
// Set the device ID and copy them to the device
out_gpair->SetDevice(device);
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
out_gpair->Resize(preds.Size());
auto d_preds = preds.ConstDevicePointer();
auto d_gpair = out_gpair->DevicePointer();
auto d_labels = info.labels_.ConstDevicePointer();
SortedLabelList slist(param_);
// Sort the labels within the groups on the device
slist.Sort(info.labels_, gptr);
// Initialize the gradients next
out_gpair->Fill(GradientPair(0.0f, 0.0f));
// Finally, compute the gradients
slist.ComputeGradients<LambdaWeightComputerT>
(d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor);
}
#endif
LambdaRankParam param_;
};
#if !defined(GTEST_TEST)
// register the objective functions
DMLC_REGISTER_PARAMETER(LambdaRankParam);
XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name())
.describe("Pairwise rank objective.")
.set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name())
.describe("LambdaRank with NDCG as objective.")
.set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name())
.describe("LambdaRank with MAP as objective.")
.set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); });
#endif
} // namespace obj
} // namespace xgboost
| 310b603c54c6dd48e8f4156b695e17446a121ed8.cu | /*!
* Copyright 2015-2019 XGBoost contributors
*/
#include <dmlc/omp.h>
#include <dmlc/timer.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <vector>
#include <algorithm>
#include <utility>
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "../common/math.h"
#include "../common/random.h"
#if defined(__CUDACC__)
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/random/linear_congruential_engine.h>
#include <cub/util_allocator.cuh>
#include "../common/device_helpers.cuh"
#endif
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(rank_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {
size_t num_pairsample;
float fix_list_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(LambdaRankParam) {
DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1)
.describe("Number of pair generated for each instance.");
DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f)
.describe("Normalize the weight of each list by this value,"
" if equals 0, no effect will happen");
}
};
#if defined(__CUDACC__)
// Helper functions
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheLeftOf(const T * __restrict__ items, uint32_t n, T v) {
return dh::LowerBound(items, n, v, thrust::greater<T>());
}
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheRightOf(const T * __restrict__ items, uint32_t n, T v) {
return n - dh::UpperBound(items, n, v, thrust::greater<T>());
}
#endif
/*! \brief helper information in a list */
struct ListEntry {
/*! \brief the predict score we in the data */
bst_float pred;
/*! \brief the actual label of the entry */
bst_float label;
/*! \brief row index in the data matrix */
unsigned rindex;
// constructor
ListEntry(bst_float pred, bst_float label, unsigned rindex)
: pred(pred), label(label), rindex(rindex) {}
// comparator by prediction
inline static bool CmpPred(const ListEntry &a, const ListEntry &b) {
return a.pred > b.pred;
}
// comparator by label
inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) {
return a.label > b.label;
}
};
/*! \brief a pair in the lambda rank */
struct LambdaPair {
/*! \brief positive index: this is a position in the list */
unsigned pos_index;
/*! \brief negative index: this is a position in the list */
unsigned neg_index;
/*! \brief weight to be filled in */
bst_float weight;
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index)
: pos_index(pos_index), neg_index(neg_index), weight(1.0f) {}
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight)
: pos_index(pos_index), neg_index(neg_index), weight(weight) {}
};
class PairwiseLambdaWeightComputer {
public:
/*!
* \brief get lambda weight for existing pairs - for pairwise objective
* \param list a list that is sorted by pred score
* \param io_pairs record of pairs, containing the pairs to fill in weights
*/
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {}
static char const* Name() {
return "rank:pairwise";
}
#if defined(__CUDACC__)
PairwiseLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {}
class PairwiseLambdaWeightMultiplier {
public:
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
return 1.0f;
}
};
inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const {
return {};
}
#endif
};
#if defined(__CUDACC__)
class BaseLambdaWeightMultiplier {
public:
BaseLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const dh::SegmentSorter<float> &segment_pred_sorter)
: dsorted_labels_(segment_label_sorter.GetItemsSpan()),
dorig_pos_(segment_label_sorter.GetOriginalPositionsSpan()),
dgroups_(segment_label_sorter.GetGroupsSpan()),
dindexable_sorted_preds_pos_(segment_pred_sorter.GetIndexableSortedPositionsSpan()) {}
protected:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dorig_pos_; // Original indices of the labels
// before they are sorted
const common::Span<const uint32_t> dgroups_; // The group indices
// Where can a prediction for a label be found in the original array, when they are sorted
const common::Span<const uint32_t> dindexable_sorted_preds_pos_;
};
// While computing the weight that needs to be adjusted by this ranking objective, we need
// to figure out where positive and negative labels chosen earlier exists, if the group
// were to be sorted by its predictions. To accommodate this, we employ the following algorithm.
// For a given group, let's assume the following:
// labels: 1 5 9 2 4 8 0 7 6 3
// predictions: 1 9 0 8 2 7 3 6 5 4
// position: 0 1 2 3 4 5 6 7 8 9
//
// After label sort:
// labels: 9 8 7 6 5 4 3 2 1 0
// position: 2 5 7 8 1 4 9 3 0 6
//
// After prediction sort:
// predictions: 9 8 7 6 5 4 3 2 1 0
// position: 1 3 5 7 8 9 6 4 0 2
//
// If a sorted label at position 'x' is chosen, then we need to find out where the prediction
// for this label 'x' exists, if the group were to be sorted by predictions.
// We first take the sorted prediction positions:
// position: 1 3 5 7 8 9 6 4 0 2
// at indices: 0 1 2 3 4 5 6 7 8 9
//
// We create a sorted prediction positional array, such that value at position 'x' gives
// us the position in the sorted prediction array where its related prediction lies.
// dindexable_sorted_preds_pos_: 8 0 9 1 7 2 6 3 4 5
// at indices: 0 1 2 3 4 5 6 7 8 9
// Basically, swap the previous 2 arrays, sort the indices and reorder positions
// for an O(1) lookup using the position where the sorted label exists.
//
// This type does that using the SegmentSorter
class IndexablePredictionSorter {
public:
IndexablePredictionSorter(const bst_float *dpreds,
const dh::SegmentSorter<float> &segment_label_sorter) {
// Sort the predictions first
segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(),
segment_label_sorter.GetGroupSegmentsSpan());
// Create an index for the sorted prediction positions
segment_pred_sorter_.CreateIndexableSortedPositions();
}
inline const dh::SegmentSorter<float> &GetPredictionSorter() const {
return segment_pred_sorter_;
}
private:
dh::SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions
};
#endif
// beta version: NDCG lambda rank
class NDCGLambdaWeightComputer
#if defined(__CUDACC__)
: public IndexablePredictionSorter
#endif
{
public:
#if defined(__CUDACC__)
// This function object computes the item's DCG value
class ComputeItemDCG : public thrust::unary_function<uint32_t, float> {
public:
XGBOOST_DEVICE ComputeItemDCG(const common::Span<const float> &dsorted_labels,
const common::Span<const uint32_t> &dgroups,
const common::Span<const uint32_t> &gidxs)
: dsorted_labels_(dsorted_labels),
dgroups_(dgroups),
dgidxs_(gidxs) {}
// Compute DCG for the item at 'idx'
__device__ __forceinline__ float operator()(uint32_t idx) const {
return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]);
}
private:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dgroups_; // The group indices - where each group
// begins and ends
const common::Span<const uint32_t> dgidxs_; // The group each items belongs to
};
// Type containing device pointers that can be cheaply copied on the kernel
class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
NDCGLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const NDCGLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dgroup_dcgs_(lwc.GetGroupDcgsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
if (dgroup_dcgs_[gidx] == 0.0) return 0.0f;
uint32_t group_begin = dgroups_[gidx];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return NDCGLambdaWeightComputer::ComputeDeltaWeight(
pos_pred_pos, neg_pred_pos,
static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]),
dgroup_dcgs_[gidx]);
}
private:
const common::Span<const float> dgroup_dcgs_; // Group DCG values
};
NDCGLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f),
weight_multiplier_(segment_label_sorter, *this) {
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Allocator to be used for managing space overhead while performing transformed reductions
dh::XGBCachingDeviceAllocator<char> alloc;
// Compute each elements DCG values and reduce them across groups concurrently.
auto end_range =
thrust::reduce_by_key(thrust::cuda::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of items DCG values within a group
dh::tcbegin(segment_label_sorter.GetOriginalPositionsSpan()),
ComputeItemDCG(segment_label_sorter.GetItemsSpan(),
segment_label_sorter.GetGroupsSpan(),
group_segments)),
thrust::make_discard_iterator(), // We don't care for the group indices
dgroup_dcg_.begin()); // Sum of the item's DCG values in the group
CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size());
}
inline const common::Span<const float> GetGroupDcgsSpan() const {
return { dgroup_dcg_.data().get(), dgroup_dcg_.size() };
}
inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const {
return weight_multiplier_;
}
#endif
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
float IDCG; // NOLINT
{
std::vector<bst_float> labels(sorted_list.size());
for (size_t i = 0; i < sorted_list.size(); ++i) {
labels[i] = sorted_list[i].label;
}
std::stable_sort(labels.begin(), labels.end(), std::greater<bst_float>());
IDCG = ComputeGroupDCGWeight(&labels[0], labels.size());
}
if (IDCG == 0.0) {
for (auto & pair : pairs) {
pair.weight = 0.0f;
}
} else {
for (auto & pair : pairs) {
unsigned pos_idx = pair.pos_index;
unsigned neg_idx = pair.neg_index;
pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx,
sorted_list[pos_idx].label, sorted_list[neg_idx].label,
IDCG);
}
}
}
static char const* Name() {
return "rank:ndcg";
}
inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) {
double sumdcg = 0.0;
for (uint32_t i = 0; i < size; ++i) {
sumdcg += ComputeItemDCGWeight(sorted_labels[i], i);
}
return static_cast<bst_float>(sumdcg);
}
private:
XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) {
return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0;
}
// Compute the weight adjustment for an item within a group:
// pos_pred_pos => Where does the positive label live, had the list been sorted by prediction
// neg_pred_pos => Where does the negative label live, had the list been sorted by prediction
// pos_label => positive label value from sorted label list
// neg_label => negative label value from sorted label list
XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos,
uint32_t neg_pred_pos,
int pos_label, int neg_label,
float idcg) {
float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f);
float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f);
bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv;
float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv;
bst_float delta = (original - changed) * (1.0f / idcg);
if (delta < 0.0f) delta = - delta;
return delta;
}
#if defined(__CUDACC__)
dh::caching_device_vector<float> dgroup_dcg_;
// This computes the adjustment to the weight
const NDCGLambdaWeightMultiplier weight_multiplier_;
#endif
};
class MAPLambdaWeightComputer
#if defined(__CUDACC__)
: public IndexablePredictionSorter
#endif
{
public:
struct MAPStats {
/*! \brief the accumulated precision */
float ap_acc{0.0f};
/*!
* \brief the accumulated precision,
* assuming a positive instance is missing
*/
float ap_acc_miss{0.0f};
/*!
* \brief the accumulated precision,
* assuming that one more positive instance is inserted ahead
*/
float ap_acc_add{0.0f};
/* \brief the accumulated positive instance count */
float hits{0.0f};
XGBOOST_DEVICE MAPStats() {} // NOLINT
XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits)
: ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {}
// For prefix scan
XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const {
return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss,
ap_acc_add + v1.ap_acc_add, hits + v1.hits};
}
// For test purposes - compare for equality
XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const {
return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss &&
ap_acc_add == rhs.ap_acc_add && hits == rhs.hits;
}
};
private:
template <typename T>
XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) {
#if defined(__CUDACC__)
thrust::swap(v0, v1);
#else
std::swap(v0, v1);
#endif
}
/*!
* \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or
* neg_pred_pos when sorted by predictions
* \param pos_pred_pos positive label's prediction value position when the groups prediction
* values are sorted
* \param neg_pred_pos negative label's prediction value position when the groups prediction
* values are sorted
* \param pos_label, neg_label the chosen positive and negative labels
* \param p_map_stats a vector containing the accumulated precisions for each position in a list
* \param map_stats_size size of the accumulated precisions vector
*/
XGBOOST_DEVICE inline static bst_float GetLambdaMAP(
int pos_pred_pos, int neg_pred_pos,
bst_float pos_label, bst_float neg_label,
const MAPStats *p_map_stats, uint32_t map_stats_size) {
if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) {
return 0.0f;
}
if (pos_pred_pos > neg_pred_pos) {
Swap(pos_pred_pos, neg_pred_pos);
Swap(pos_label, neg_label);
}
bst_float original = p_map_stats[neg_pred_pos].ap_acc;
if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc;
bst_float changed = 0;
bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f;
bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f;
if (label1 == label2) {
return 0.0;
} else if (label1 < label2) {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add;
changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1);
} else {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss;
changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1);
}
bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits);
if (ans < 0) ans = -ans;
return ans;
}
public:
/*
* \brief obtain preprocessing results for calculating delta MAP
* \param sorted_list the list containing entry information
* \param map_stats a vector containing the accumulated precisions for each position in a list
*/
inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list,
std::vector<MAPStats> *p_map_acc) {
std::vector<MAPStats> &map_acc = *p_map_acc;
map_acc.resize(sorted_list.size());
bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0;
for (size_t i = 1; i <= sorted_list.size(); ++i) {
if (sorted_list[i - 1].label > 0.0f) {
hit++;
acc1 += hit / i;
acc2 += (hit - 1) / i;
acc3 += (hit + 1) / i;
}
map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit);
}
}
static char const* Name() {
return "rank:map";
}
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
std::vector<MAPStats> map_stats;
GetMAPStats(sorted_list, &map_stats);
for (auto & pair : pairs) {
pair.weight *=
GetLambdaMAP(pair.pos_index, pair.neg_index,
sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label,
&map_stats[0], map_stats.size());
}
}
#if defined(__CUDACC__)
MAPLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()),
weight_multiplier_(segment_label_sorter, *this) {
this->CreateMAPStats(dlabels, segment_label_sorter);
}
void CreateMAPStats(const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {
// For each group, go through the sorted prediction positions, and look up its corresponding
// label from the unsorted labels (from the original label list)
// For each item in the group, compute its MAP stats.
// Interleave the computation of map stats amongst different groups.
// First, determine postive labels in the dataset individually
auto nitems = segment_label_sorter.GetNumItems();
dh::caching_device_vector<uint32_t> dhits(nitems, 0);
// Original positions of the predictions after they have been sorted
const auto &pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsSpan();
// Unsorted labels
const float *unsorted_labels = dlabels;
auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) {
return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0;
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
dhits.begin(),
DeterminePositiveLabelLambda);
// Allocator to be used by sort for managing space overhead while performing prefix scans
dh::XGBCachingDeviceAllocator<char> alloc;
// Next, prefix scan the positive labels that are segmented to accumulate them.
// This is required for computing the accumulated precisions
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Data segmented into different groups...
thrust::inclusive_scan_by_key(thrust::cuda::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
dhits.begin(), // Input value
dhits.begin()); // In-place scan
// Compute accumulated precisions for each item, assuming positive and
// negative instances are missing.
// But first, compute individual item precisions
const auto *dhits_arr = dhits.data().get();
// Group info on device
const auto &dgroups = segment_label_sorter.GetGroupsSpan();
auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) {
if (unsorted_labels[pred_original_pos[idx]] > 0.0f) {
auto idx_within_group = (idx - dgroups[group_segments[idx]]) + 1;
return MAPStats{static_cast<float>(dhits_arr[idx]) / idx_within_group,
static_cast<float>(dhits_arr[idx] - 1) / idx_within_group,
static_cast<float>(dhits_arr[idx] + 1) / idx_within_group,
1.0f};
}
return MAPStats{};
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
this->dmap_stats_.begin(),
ComputeItemPrecisionLambda);
// Lastly, compute the accumulated precisions for all the items segmented by groups.
// The precisions are accumulated within each group
thrust::inclusive_scan_by_key(thrust::cuda::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
this->dmap_stats_.begin(), // Input map stats
this->dmap_stats_.begin()); // In-place scan and output here
}
inline const common::Span<const MAPStats> GetMapStatsSpan() const {
return { dmap_stats_.data().get(), dmap_stats_.size() };
}
// Type containing device pointers that can be cheaply copied on the kernel
class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
MAPLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const MAPLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dmap_stats_(lwc.GetMapStatsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
uint32_t group_begin = dgroups_[gidx];
uint32_t group_end = dgroups_[gidx + 1];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return MAPLambdaWeightComputer::GetLambdaMAP(
pos_pred_pos, neg_pred_pos,
dsorted_labels_[pidx], dsorted_labels_[nidx],
&dmap_stats_[group_begin], group_end - group_begin);
}
private:
common::Span<const MAPStats> dmap_stats_; // Start address of the map stats for every sorted
// prediction value
};
inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; }
private:
dh::caching_device_vector<MAPStats> dmap_stats_;
// This computes the adjustment to the weight
const MAPLambdaWeightMultiplier weight_multiplier_;
#endif
};
#if defined(__CUDACC__)
class SortedLabelList : dh::SegmentSorter<float> {
private:
const LambdaRankParam ¶m_; // Objective configuration
public:
explicit SortedLabelList(const LambdaRankParam ¶m)
: param_(param) {}
// Sort the labels that are grouped by 'groups'
void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) {
this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups);
}
// This kernel can only run *after* the kernel in sort is completed, as they
// use the default stream
template <typename LambdaWeightComputerT>
void ComputeGradients(const bst_float *dpreds, // Unsorted predictions
const bst_float *dlabels, // Unsorted labels
const HostDeviceVector<bst_float> &weights,
int iter,
GradientPair *out_gpair,
float weight_normalization_factor) {
// Group info on device
const auto &dgroups = this->GetGroupsSpan();
uint32_t ngroups = this->GetNumGroups() + 1;
uint32_t total_items = this->GetNumItems();
uint32_t niter = param_.num_pairsample * total_items;
float fix_list_weight = param_.fix_list_weight;
const auto &original_pos = this->GetOriginalPositionsSpan();
uint32_t num_weights = weights.Size();
auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr;
const auto &sorted_labels = this->GetItemsSpan();
// This is used to adjust the weight of different elements based on the different ranking
// objective function policies
LambdaWeightComputerT weight_computer(dpreds, dlabels, *this);
auto wmultiplier = weight_computer.GetWeightMultiplier();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// For each instance in the group, compute the gradient pair concurrently
dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) {
// First, determine the group 'idx' belongs to
uint32_t item_idx = idx % total_items;
uint32_t group_idx = dh::UpperBound(dgroups.data(), ngroups, item_idx);
// Span of this group within the larger labels/predictions sorted tuple
uint32_t group_begin = dgroups[group_idx - 1];
uint32_t group_end = dgroups[group_idx];
uint32_t total_group_items = group_end - group_begin;
// Are the labels diverse enough? If they are all the same, then there is nothing to pick
// from another group - bail sooner
if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return;
// Find the number of labels less than and greater than the current label
// at the sorted index position item_idx
uint32_t nleft = CountNumItemsToTheLeftOf(
sorted_labels.data() + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]);
uint32_t nright = CountNumItemsToTheRightOf(
sorted_labels.data() + item_idx, group_end - item_idx, sorted_labels[item_idx]);
// Create a minstd_rand object to act as our source of randomness
thrust::minstd_rand rng((iter + 1) * 1111);
rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin);
// Create a uniform_int_distribution to produce a sample from outside of the
// present label group
thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1);
int sample = dist(rng);
int pos_idx = -1; // Bigger label
int neg_idx = -1; // Smaller label
// Are we picking a sample to the left/right of the current group?
if (sample < nleft) {
// Go left
pos_idx = sample + group_begin;
neg_idx = item_idx;
} else {
pos_idx = item_idx;
uint32_t items_in_group = total_group_items - nleft - nright;
neg_idx = sample + items_in_group + group_begin;
}
// Compute and assign the gradients now
const float eps = 1e-16f;
bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]);
bst_float g = p - 1.0f;
bst_float h = thrust::max(p * (1.0f - p), eps);
// Rescale each gradient and hessian so that the group has a weighted constant
float scale = __frcp_ru(niter / total_items);
if (fix_list_weight != 0.0f) {
scale *= fix_list_weight / total_group_items;
}
float weight = num_weights ? dweights[group_idx - 1] : 1.0f;
weight *= weight_normalization_factor;
weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx);
weight *= scale;
// Accumulate gradient and hessian in both positive and negative indices
const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair);
const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair);
});
// Wait until the computations done by the kernel is complete
dh::safe_cuda(cudaStreamSynchronize(nullptr));
}
};
#endif
// objective for lambda rank
template <typename LambdaWeightComputerT>
class LambdaRankObj : public ObjFunction {
public:
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match";
// quick consistency when group is not available
std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_;
CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size())
<< "group structure not consistent with #rows" << ", "
<< "group ponter size: " << gptr.size() << ", "
<< "labels size: " << info.labels_.Size() << ", "
<< "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back());
#if defined(__CUDACC__)
// Check if we have a GPU assignment; else, revert back to CPU
auto device = tparam_->gpu_id;
if (device >= 0) {
ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr);
} else {
// Revert back to CPU
#endif
ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr);
#if defined(__CUDACC__)
}
#endif
}
const char* DefaultEvalMetric() const override {
return "map";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(LambdaWeightComputerT::Name());
out["lambda_rank_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["lambda_rank_param"], ¶m_);
}
private:
bst_float ComputeWeightNormalizationFactor(const MetaInfo& info,
const std::vector<unsigned> &gptr) {
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
bst_float sum_weights = 0;
for (bst_omp_uint k = 0; k < ngroup; ++k) {
sum_weights += info.GetWeight(k);
}
return ngroup / sum_weights;
}
void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU.";
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
const auto& preds_h = preds.HostVector();
const auto& labels = info.labels_.HostVector();
std::vector<GradientPair>& gpair = out_gpair->HostVector();
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
out_gpair->Resize(preds.Size());
#pragma omp parallel
{
// parallel construct, declare random number generator here, so that each
// thread use its own random number generator, seed by thread id and current iteration
std::minstd_rand rnd((iter + 1) * 1111);
std::vector<LambdaPair> pairs;
std::vector<ListEntry> lst;
std::vector< std::pair<bst_float, unsigned> > rec;
#pragma omp for schedule(static)
for (bst_omp_uint k = 0; k < ngroup; ++k) {
lst.clear(); pairs.clear();
for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) {
lst.emplace_back(preds_h[j], labels[j], j);
gpair[j] = GradientPair(0.0f, 0.0f);
}
std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred);
rec.resize(lst.size());
for (unsigned i = 0; i < lst.size(); ++i) {
rec[i] = std::make_pair(lst[i].label, i);
}
std::stable_sort(rec.begin(), rec.end(), common::CmpFirst);
// enumerate buckets with same label, for each item in the lst, grab another sample randomly
for (unsigned i = 0; i < rec.size(); ) {
unsigned j = i + 1;
while (j < rec.size() && rec[j].first == rec[i].first) ++j;
// bucket in [i,j), get a sample outside bucket
unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j);
if (nleft + nright != 0) {
int nsample = param_.num_pairsample;
while (nsample --) {
for (unsigned pid = i; pid < j; ++pid) {
unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd);
if (ridx < nleft) {
pairs.emplace_back(rec[ridx].second, rec[pid].second,
info.GetWeight(k) * weight_normalization_factor);
} else {
pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second,
info.GetWeight(k) * weight_normalization_factor);
}
}
}
}
i = j;
}
// get lambda weight for the pairs
LambdaWeightComputerT::GetLambdaWeight(lst, &pairs);
// rescale each gradient and hessian so that the lst have constant weighted
float scale = 1.0f / param_.num_pairsample;
if (param_.fix_list_weight != 0.0f) {
scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]);
}
for (auto & pair : pairs) {
const ListEntry &pos = lst[pair.pos_index];
const ListEntry &neg = lst[pair.neg_index];
const bst_float w = pair.weight * scale;
const float eps = 1e-16f;
bst_float p = common::Sigmoid(pos.pred - neg.pred);
bst_float g = p - 1.0f;
bst_float h = std::max(p * (1.0f - p), eps);
// accumulate gradient and hessian in both pid, and nid
gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h);
gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h);
}
}
}
}
#if defined(__CUDACC__)
void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU.";
auto device = tparam_->gpu_id;
dh::safe_cuda(cudaSetDevice(device));
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
// Set the device ID and copy them to the device
out_gpair->SetDevice(device);
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
out_gpair->Resize(preds.Size());
auto d_preds = preds.ConstDevicePointer();
auto d_gpair = out_gpair->DevicePointer();
auto d_labels = info.labels_.ConstDevicePointer();
SortedLabelList slist(param_);
// Sort the labels within the groups on the device
slist.Sort(info.labels_, gptr);
// Initialize the gradients next
out_gpair->Fill(GradientPair(0.0f, 0.0f));
// Finally, compute the gradients
slist.ComputeGradients<LambdaWeightComputerT>
(d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor);
}
#endif
LambdaRankParam param_;
};
#if !defined(GTEST_TEST)
// register the objective functions
DMLC_REGISTER_PARAMETER(LambdaRankParam);
XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name())
.describe("Pairwise rank objective.")
.set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name())
.describe("LambdaRank with NDCG as objective.")
.set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name())
.describe("LambdaRank with MAP as objective.")
.set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); });
#endif
} // namespace obj
} // namespace xgboost
|
f4a59ee769cafa1f1993b011ccc853cadc3d959f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include "native_kernel.h"
int main(int argc, char **argv)
{
std::ifstream values;
values.open("./values.txt");
int D,N;
int niter = atoi(argv[1]);
float learn = atof(argv[2]);
float *xvalues,*y_actual,*real_weights,*weights;
values>>D>>N;
printf("N = %d D = %d",N,D);
xvalues = new float [D*N];
for(int i = 0 ; i<N;i++)
{
for (int j = 0; j<D;j++)
{
values>>xvalues[j*N + i];
}
}
y_actual = new float[N];
for(int i = 0; i<N;i++)
{
values>>y_actual[i];
}
real_weights = new float[D];
for(int i = 0; i<D;i++)
{
values>>real_weights[i];
}
weights = new float[D];
for(int i = 0; i<D;i++)
{
values>>weights[i];
}
// printf("Done Reading Data\n");
float *d_xval, *d_yval,* d_weights, *d_error,*h_error,*tempa,*d_gradient;
hipMalloc((void**)&d_xval,sizeof(float)*D*N);
hipMalloc((void**)&tempa,sizeof(float)*D*N);
hipMalloc((void**)&d_weights,sizeof(float)*D);
hipMalloc((void**)&d_gradient,sizeof(float)*D);
hipMalloc((void**)&d_yval,sizeof(float)*N);
hipMalloc((void**)&d_error,sizeof(float)*N);
hipMemcpy(d_xval,xvalues,sizeof(float)*D*N,hipMemcpyHostToDevice);
hipMemcpy(d_yval,(void *)y_actual,sizeof(float)*N,hipMemcpyHostToDevice);
hipMemcpy(d_weights,(void *)weights,sizeof(float)*D,hipMemcpyHostToDevice);
int count = 0;
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start);
// printf("Loop Start\n");
while(count<niter)
{
hipMemcpyToSymbol(c_weights,weights,sizeof(float)*D,0,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( getdotError), dim3(iDivUp(N,1024)),dim3(1024), 0, 0, N,D,d_xval,d_yval,d_error);
// multiply<<<iDivUp(N*D,1024),1024>>>(N,D,d_xval,d_error,tempa);
for(int i = 0; i<D;i++)
hipLaunchKernelGGL(( better_reduce_kernel<256>), dim3(20),dim3(256),256*sizeof(float), 0, d_xval + i*N,d_error,d_gradient+i,N,D);
// hipMemcpy(gradient,d_weights,sizeof(float)*D,hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( update_weights), dim3(iDivUp(D,128)),dim3(128), 0, 0, d_weights,d_gradient,learn);
printf("Error = \n");
count++;
}
// hipEventRecord(stop);
// hipEventSynchronize(stop);
// float time_in_ms;
// hipEventElapsedTime(&time_in_ms,start,stop);
h_error = new float[N];
hipMemcpy(h_error,d_error,sizeof(float)*N,hipMemcpyDeviceToHost);
// for(int i = 0; i<100;i++)
// {
// printf("%f ",h_error[i]);
// if(i%10==0)
// printf("\n");
// }
// printf("Compute Time = %f\n",time_in_ms);
// sdiff = sqrt(sdiff/D);
// printf("Final Error = %f\n",sdiff);
}
| f4a59ee769cafa1f1993b011ccc853cadc3d959f.cu | #include <fstream>
#include "native_kernel.h"
int main(int argc, char **argv)
{
std::ifstream values;
values.open("./values.txt");
int D,N;
int niter = atoi(argv[1]);
float learn = atof(argv[2]);
float *xvalues,*y_actual,*real_weights,*weights;
values>>D>>N;
printf("N = %d D = %d",N,D);
xvalues = new float [D*N];
for(int i = 0 ; i<N;i++)
{
for (int j = 0; j<D;j++)
{
values>>xvalues[j*N + i];
}
}
y_actual = new float[N];
for(int i = 0; i<N;i++)
{
values>>y_actual[i];
}
real_weights = new float[D];
for(int i = 0; i<D;i++)
{
values>>real_weights[i];
}
weights = new float[D];
for(int i = 0; i<D;i++)
{
values>>weights[i];
}
// printf("Done Reading Data\n");
float *d_xval, *d_yval,* d_weights, *d_error,*h_error,*tempa,*d_gradient;
cudaMalloc((void**)&d_xval,sizeof(float)*D*N);
cudaMalloc((void**)&tempa,sizeof(float)*D*N);
cudaMalloc((void**)&d_weights,sizeof(float)*D);
cudaMalloc((void**)&d_gradient,sizeof(float)*D);
cudaMalloc((void**)&d_yval,sizeof(float)*N);
cudaMalloc((void**)&d_error,sizeof(float)*N);
cudaMemcpy(d_xval,xvalues,sizeof(float)*D*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_yval,(void *)y_actual,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_weights,(void *)weights,sizeof(float)*D,cudaMemcpyHostToDevice);
int count = 0;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start);
// printf("Loop Start\n");
while(count<niter)
{
cudaMemcpyToSymbol(c_weights,weights,sizeof(float)*D,0,cudaMemcpyHostToDevice);
getdotError<<<iDivUp(N,1024),1024>>>(N,D,d_xval,d_yval,d_error);
// multiply<<<iDivUp(N*D,1024),1024>>>(N,D,d_xval,d_error,tempa);
for(int i = 0; i<D;i++)
better_reduce_kernel<256><<<20,256,256*sizeof(float)>>>(d_xval + i*N,d_error,d_gradient+i,N,D);
// cudaMemcpy(gradient,d_weights,sizeof(float)*D,cudaMemcpyDeviceToHost);
update_weights<<<iDivUp(D,128),128>>> (d_weights,d_gradient,learn);
printf("Error = \n");
count++;
}
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// float time_in_ms;
// cudaEventElapsedTime(&time_in_ms,start,stop);
h_error = new float[N];
cudaMemcpy(h_error,d_error,sizeof(float)*N,cudaMemcpyDeviceToHost);
// for(int i = 0; i<100;i++)
// {
// printf("%f ",h_error[i]);
// if(i%10==0)
// printf("\n");
// }
// printf("Compute Time = %f\n",time_in_ms);
// sdiff = sqrt(sdiff/D);
// printf("Final Error = %f\n",sdiff);
}
|
cbf1bdc75104ef4799a091459928daaa79a58212.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Sum <T = float32, Device = CUDA> */
template <typename T>
__global__ void _Sum(
const int count,
const int axis_dim,
const int inner_dim,
const T* x,
float* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T sum_val = (T)0;
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
sum_val += x[offset + j * inner_dim];
y[idx] = sum_val;
}
}
template<> void Sum<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float* x,
float* y,
CUDAContext* ctx) {
_Sum<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, axis_dim, inner_dim, x, y);
}
/*! SumGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _SumGrad(
const int count,
const int axis_dim,
const int inner_dim,
const T coeff,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
dx[offset + j * inner_dim] = dy[idx] * coeff;
}
}
template<> void SumGrad<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float coeff,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SumGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, axis_dim, inner_dim, coeff, dy, dx);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | cbf1bdc75104ef4799a091459928daaa79a58212.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Sum <T = float32, Device = CUDA> */
template <typename T>
__global__ void _Sum(
const int count,
const int axis_dim,
const int inner_dim,
const T* x,
float* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
T sum_val = (T)0;
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
sum_val += x[offset + j * inner_dim];
y[idx] = sum_val;
}
}
template<> void Sum<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float* x,
float* y,
CUDAContext* ctx) {
_Sum<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, axis_dim, inner_dim, x, y);
}
/*! SumGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _SumGrad(
const int count,
const int axis_dim,
const int inner_dim,
const T coeff,
const T* dy,
float* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int offset = (idx / inner_dim * axis_dim)
* inner_dim + idx % inner_dim;
for (int j = 0; j < axis_dim; j++)
dx[offset + j * inner_dim] = dy[idx] * coeff;
}
}
template<> void SumGrad<float, CUDAContext>(
const int count,
const int axis_dim,
const int inner_dim,
const float coeff,
const float* dy,
float* dx,
CUDAContext* ctx) {
_SumGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, axis_dim, inner_dim, coeff, dy, dx);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
9f7c1f5a2c45658fcb0ea4a5f9b888a08f5c74ab.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SetupPoissKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *curand_state = NULL;
hipMalloc(&curand_state, XSIZE*YSIZE);
uint64_t n_dir_conn = 1;
unsigned long long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SetupPoissKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, curand_state,n_dir_conn,seed);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SetupPoissKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, curand_state,n_dir_conn,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SetupPoissKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, curand_state,n_dir_conn,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9f7c1f5a2c45658fcb0ea4a5f9b888a08f5c74ab.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SetupPoissKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *curand_state = NULL;
cudaMalloc(&curand_state, XSIZE*YSIZE);
uint64_t n_dir_conn = 1;
unsigned long long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SetupPoissKernel<<<gridBlock,threadBlock>>>(curand_state,n_dir_conn,seed);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SetupPoissKernel<<<gridBlock,threadBlock>>>(curand_state,n_dir_conn,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SetupPoissKernel<<<gridBlock,threadBlock>>>(curand_state,n_dir_conn,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
dd93bc193c0922d8dfcc28f4eccbd05bb064e504.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <algorithm>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAError() \
do \
{ \
if(hipPeekAtLastError() != hipSuccess) \
{ \
std::cerr << hipGetErrorString(hipPeekAtLastError()) << __FILE__ << __LINE__ << "\n"; \
exit(-1); \
} \
} while(0) \
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
//number of neighboring grid cells to check
//LOOK-2.1
//having to check 27 grid cells would require more checks in the loop, so it
//would be less efficient than checking 8 grid cells
#define CHECK_8 0
#ifdef CHECK_8
constexpr int NEIGHBORS_TO_CHECK = 8;
constexpr int NEIGHBORS_TO_CHECK_WIDTH = 2;
#else
constexpr int NEIGHBORS_TO_CHECK = 27;
constexpr int NEIGHBORS_TO_CHECK_WIDTH = 3;
#endif
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
//pos and vel sorted by array indices
glm::vec3* dev_vel_sorted;
glm::vec3* dev_pos_sorted;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash(static_cast<int>(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3(static_cast<float>(unitDistrib(rng)), static_cast<float>(unitDistrib(rng)), static_cast<float>(unitDistrib(rng)));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
const int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
const glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc(reinterpret_cast<void**>(&dev_pos), N * sizeof(glm::vec3));
checkCUDAError();
hipMalloc(reinterpret_cast<void**>(&dev_vel1), N * sizeof(glm::vec3));
checkCUDAError();
hipMalloc(reinterpret_cast<void**>(&dev_vel2), N * sizeof(glm::vec3));
checkCUDAError();
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAError();
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = static_cast<int>(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
std::cout << gridCellCount << "-" << gridCellWidth << "-" << gridMinimum.x << "-" << gridMinimum.y << "-" << gridMinimum.z << "\n";
//10648-10--110--110--110
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc(reinterpret_cast<void**>(&dev_particleArrayIndices), N * sizeof(int));
checkCUDAError();
hipMalloc(reinterpret_cast<void**>(&dev_particleGridIndices), N * sizeof(int));
checkCUDAError();
hipMalloc(reinterpret_cast<void**>(&dev_gridCellStartIndices), gridCellCount * sizeof(int));
checkCUDAError();
hipMalloc(reinterpret_cast<void**>(&dev_gridCellEndIndices), gridCellCount * sizeof(int));
checkCUDAError();
//part 2.3 allocate memory for position and velocity struct
hipMalloc(reinterpret_cast<void**>(&dev_vel_sorted), N * sizeof(*dev_vel_sorted));
checkCUDAError();
hipMalloc(reinterpret_cast<void**>(&dev_pos_sorted), N * sizeof(*dev_pos_sorted));
checkCUDAError();
hipDeviceSynchronize();
checkCUDAError();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAError();
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
//swap pointers (helps with ping pong)
template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type* = nullptr>
void swap_pointers(T& p1, T& p2)
{
T temp = p1;
p1 = p2;
p2 = temp;
}
//clamps the vec3 by normalizing it
template<typename T>
__device__ glm::vec3 clamp_vec3(T&& vec)
{
if(glm::length(vec) > maxSpeed)
{
return glm::normalize(std::forward<T>(vec)) * maxSpeed;
}
return vec;
}
//helper function to check rule
template<typename CheckSuccessCallback>
__device__ void check_rule(float rule_distance, int this_boid, int other_boid, const glm::vec3* pos, CheckSuccessCallback check_success_callback)
{
const auto& this_boid_pos = pos[this_boid];
const auto& other_boid_pos = pos[other_boid];
if (this_boid != other_boid && glm::distance(this_boid_pos, other_boid_pos) < rule_distance)
{
check_success_callback();
}
}
//The following 3 functions checks two
//boids against each rule
__device__ void check_rule1(int this_boid, int other_boid, const glm::vec3* pos, glm::vec3& perceived_center, int& num_neighbors)
{
check_rule(rule1Distance, this_boid, other_boid, pos,
[&]()
{
const auto& other_boid_pos = pos[other_boid];
perceived_center += other_boid_pos;
num_neighbors++;
});
}
__device__ void check_rule2(int this_boid, int other_boid, const glm::vec3* pos, glm::vec3& c)
{
check_rule(rule2Distance, this_boid, other_boid, pos,
[&]()
{
const auto& this_boid_pos = pos[this_boid];
const auto& other_boid_pos = pos[other_boid];
c -= (other_boid_pos - this_boid_pos);
});
}
__device__ void check_rule3(int this_boid, int other_boid, const glm::vec3* pos, glm::vec3& perceived_velocity, int& num_neighbors,
const glm::vec3* vel)
{
check_rule(rule3Distance, this_boid, other_boid, pos,
[&]()
{
perceived_velocity += vel[other_boid];
num_neighbors++;
});
}
//The following 3 functions computes the
//rule velocity after all the boids in the
//area have been iterated through
__device__ glm::vec3 finish_rule1(const glm::vec3& this_boid_pos, glm::vec3& perceived_center, int& num_neighbors)
{
if(num_neighbors)
{
perceived_center /= num_neighbors;
return (perceived_center - this_boid_pos) * rule1Scale;
}
return {};
}
__device__ glm::vec3 finish_rule2(const glm::vec3& c)
{
return c * rule2Scale;
}
__device__ glm::vec3 finish_rule3(glm::vec3& perceived_velocity, int& num_neighbors)
{
if (num_neighbors)
{
perceived_velocity /= num_neighbors;
return perceived_velocity * rule3Scale;
}
return {};
}
//The following 3 functions computes each rule naively (iterate through all boids)
__device__ glm::vec3 compute_rule1_naive(int N, int this_boid, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 perceived_center{};
auto num_neighbors = 0;
for (auto other_boid = 0; other_boid < N; other_boid++)
{
check_rule1(this_boid, other_boid, pos, perceived_center, num_neighbors);
}
const auto& this_boid_pos = pos[this_boid];
return finish_rule1(this_boid_pos, perceived_center, num_neighbors);
}
__device__ glm::vec3 compute_rule2_naive(int N, int this_boid, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 c{};
for (auto other_boid = 0; other_boid < N; other_boid++)
{
check_rule2(this_boid, other_boid, pos, c);
}
return finish_rule2(c);
}
__device__ glm::vec3 compute_rule3_naive(int N, int this_boid, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 perceived_velocity{};
auto num_neighbors = 0;
for (auto other_boid = 0; other_boid < N; other_boid++)
{
check_rule3(this_boid, other_boid, pos, perceived_velocity, num_neighbors, vel);
}
return finish_rule3(perceived_velocity, num_neighbors);
}
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
//add the result of all the rules
auto result = compute_rule1_naive(N, iSelf, pos, vel) +
compute_rule2_naive(N, iSelf, pos, vel) +
compute_rule3_naive(N, iSelf, pos, vel);
return result;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
// If vel1 is updated, then another GPU thread can update the vel1
// and that would cause inconsistency.
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
const auto vel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
vel2[index] = clamp_vec3(vel);
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
//probably
//for(z)
// for(y)
// for(x)
//because x is constantly changing, while y and z
//are less frequently changing in the loop
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//set the indices of the boid to match the thread index (same index as the position and velocity indices)
indices[index] = index;
//pos[index] - gridMin is the boid's position, with instead of
//offsetting at [-100:100], it's offseted at [0:200]
const auto b_pos = pos[index] - gridMin;
//get boid's position index or "grid cube" that the boid is in (truncated to an int)
const glm::ivec3 b_pos_index = b_pos * inverseCellWidth;
//the position is converted to a 1d int (more efficient instead of holding the entire vec3)
gridIndices[index] = gridIndex3Dto1D(b_pos_index.x, b_pos_index.y, b_pos_index.z, gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
//how to expand variadic templates in c++11
//https://stackoverflow.com/questions/41623422/c-expand-variadic-template-arguments-into-a-statement
template<typename Type = int, typename... Ts>
__device__ auto truncate_to(Ts*... ts)
{
(void)std::initializer_list<int>{(*ts = static_cast<Type>(*ts), 0)...};
}
//truncate glm vec to int
template<typename Vec, typename ConvertToType = int>
__device__ void truncate_glm_vec(Vec& vec)
{
truncate_to<ConvertToType>(&vec.x, &vec.y, &vec.z);
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//get the sorted boid grid value
const auto previous_grid_cell_value = particleGridIndices[index - 1];
const auto grid_cell_value = particleGridIndices[index];
//if we are at the first grid index,
//we know that the grid index must be in a start index
if(!index)
{
gridCellStartIndices[grid_cell_value] = index;
}
//get the previous element, and check if they belong to the same grid
//if not, then we know that the grid_cell_value must be the
//start of a new grid index while previous_grid_cell_value must be
//the last element to the previous grid cell group
//c++17 if initializers: (VS15 doesn't have that support =/)
//if(const auto previous_grid_cell = ...; previous_grid_cell != grid_cell)
else
{
if (previous_grid_cell_value != grid_cell_value)
{
gridCellStartIndices[grid_cell_value] = index;
gridCellEndIndices[previous_grid_cell_value] = index;
}
//still need to check for the last grid element and set
//the end indices to point to this grid index
else if(index == N - 1)
{
gridCellEndIndices[grid_cell_value] = index;
}
}
}
template<int neighbor_max_size = NEIGHBORS_TO_CHECK, int neighbor_width = NEIGHBORS_TO_CHECK_WIDTH, typename CheckBoidFunc>
__device__ void do_something_with_boid_neighbors(const glm::vec3 b_pos, int grid_resolution, float cell_width, const glm::vec3& grid_min, int* grid_cell_start_index, int* grid_cell_end_index, CheckBoidFunc check_boid_func)
{
//offset boid position to [0:200]
const glm::vec3 b_pos_offset = b_pos - grid_min;
//truncate the float position into an int position
glm::vec3 b_pos_int = b_pos_offset;
truncate_glm_vec(b_pos_int);
//this is the boid grid cell index (calculated by dividing by cell width)
const glm::ivec3 b_cell_index = b_pos_offset / cell_width;
//now get the position of the boid inside the cellWidth, which is cut up into 8
const glm::vec3 b_pos_inside_cell_width = (b_pos_offset - b_pos_int) * cell_width;
//find all the neighbors and populate neighbor array
//which side is the grid on?
enum class GridCellSide : uint8_t
{
Left,
Right
};
//find which side are we on (point is either x, y, z)
auto find_grid_cell_side = [&](float point)
{
return point < (cell_width / 2) ? GridCellSide::Left : GridCellSide::Right;
};
//find the side for x, y, z
const GridCellSide x_side = find_grid_cell_side(b_pos_inside_cell_width.x);
const GridCellSide y_side = find_grid_cell_side(b_pos_inside_cell_width.y);
const GridCellSide z_side = find_grid_cell_side(b_pos_inside_cell_width.z);
//find which side to iterate to (either -1 or 0)
int x_offset = -neighbor_width + 1;
int y_offset = -neighbor_width + 1;
int z_offset = -neighbor_width + 1;
if(x_side == GridCellSide::Right)
{
x_offset = 0;
}
if(y_side == GridCellSide::Right)
{
y_offset = 0;
}
if(z_side == GridCellSide::Right)
{
z_offset = 0;
}
//iterate x (either from -1 ... 0 or 0 ... 1)
for(int i = x_offset; i < x_offset + neighbor_width; ++i)
{
const int x = b_cell_index.x + i;
//check if out of bounds
if(x < 0 || x >= grid_resolution)
{
continue;
}
//iterate y
for(int k = y_offset; k < y_offset + neighbor_width; ++k)
{
const int y = b_cell_index.y + k;
//check if out of bounds
if (y < 0 || y >= grid_resolution)
{
continue;
}
//iterate z
for(int l = z_offset; l < z_offset + neighbor_width; ++l)
{
const int z = b_cell_index.z + l;
//check if out of bounds
if (z < 0 || z >= grid_resolution)
{
continue;
}
//get the index into the grid_cell_start_index/grid_cell_end_inde
const int index_into_grid_cell_pointer_index = gridIndex3Dto1D(x, y, z, grid_resolution);
//compute the start and end indices for the grid cell index
auto grid_start_index = grid_cell_start_index[index_into_grid_cell_pointer_index];
const auto grid_end_index = grid_cell_end_index[index_into_grid_cell_pointer_index];
//iterate through the boids in the grid cell
for (; grid_start_index < grid_end_index; ++grid_start_index)
{
//pass in the boid array index
check_boid_func(grid_start_index);
}
}
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
//get the boid index
const auto this_boid_index = particleArrayIndices[index];
//get location of boid (float)
const auto this_boid_pos = pos[this_boid_index];
//do rule1, rule2, rule3
int rule1_num_neighbors = 0;
int rule3_num_neighbors = 0;
glm::vec3 perceived_center = {};
glm::vec3 c = {};
glm::vec3 perceived_velocity = {};
//for each neighbor, compute the rules and add them to perceived_center, c, perceived_velocity (rules 1, 2, 3)
do_something_with_boid_neighbors(this_boid_pos,
gridResolution, cellWidth,
gridMin, gridCellStartIndices,
gridCellEndIndices,
[&](const int index_into_boid_array)
{
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
const int other_boid_index = particleArrayIndices[index_into_boid_array];
check_rule1(this_boid_index, other_boid_index, pos,
perceived_center, rule1_num_neighbors);
check_rule2(this_boid_index, other_boid_index, pos, c);
check_rule3(this_boid_index, other_boid_index, pos,
perceived_velocity, rule3_num_neighbors, vel1);
}
);
// - Clamp the speed change before putting the new speed in vel2
//compute each rule
const auto rule_1_result = finish_rule1(this_boid_pos, perceived_center, rule1_num_neighbors);
const auto rule_2_result = finish_rule2(c);
const auto rule_3_result = finish_rule3(perceived_velocity, rule3_num_neighbors);
//add to velocity
const auto vel = vel1[this_boid_index] + rule_1_result + rule_2_result + rule_3_result;
vel2[this_boid_index] = clamp_vec3(vel);
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
//get the boid index
const auto this_boid_index = index;
//get location of boid (float)
const auto this_boid_pos = pos[this_boid_index];
//do rule1, rule2, rule3
int rule1_num_neighbors = 0;
int rule3_num_neighbors = 0;
glm::vec3 perceived_center = {};
glm::vec3 c = {};
glm::vec3 perceived_velocity = {};
//for each neighbor, compute the rules and add them to perceived_center, c, perceived_velocity (rules 1, 2, 3)
do_something_with_boid_neighbors(this_boid_pos,
gridResolution, cellWidth,
gridMin, gridCellStartIndices,
gridCellEndIndices,
[&](const int index_into_boid_array)
{
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
const int other_boid_index = index_into_boid_array;
check_rule1(this_boid_index, other_boid_index, pos,
perceived_center, rule1_num_neighbors);
check_rule2(this_boid_index, other_boid_index, pos, c);
check_rule3(this_boid_index, other_boid_index, pos,
perceived_velocity, rule3_num_neighbors, vel1);
}
);
// - Clamp the speed change before putting the new speed in vel2
//compute each rule
const auto rule_1_result = finish_rule1(this_boid_pos, perceived_center, rule1_num_neighbors);
const auto rule_2_result = finish_rule2(c);
const auto rule_3_result = finish_rule3(perceived_velocity, rule3_num_neighbors);
//add to velocity
const auto vel = vel1[this_boid_index] + rule_1_result + rule_2_result + rule_3_result;
vel2[this_boid_index] = clamp_vec3(vel);
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAError();
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel1);
checkCUDAError();
swap_pointers(dev_vel2, dev_vel1);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellPerGrid((gridCellCount + blockSize - 1) / blockSize);
//reset the grid cell start / end
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(cellPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, std::numeric_limits<int>::min());
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(cellPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, std::numeric_limits<int>::min());
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
//compute the indices
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAError();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
//sort with thrust, so we have <key = grid index, value = array_index>
thrust::device_ptr<int> thrust_grid_indices(dev_particleGridIndices);
thrust::device_ptr<int> thrust_array_indices(dev_particleArrayIndices);
thrust::sort_by_key(thrust_grid_indices, thrust_grid_indices + numObjects, thrust_array_indices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//make sure the start and end indices are mapped to the correct grid indices
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAError();
// - Perform velocity updates using neighbor search
//this finds possible neighboring particles in neighboring grid cells to find calculate the total velocity for a particle
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAError();
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel1);
checkCUDAError();
swap_pointers(dev_vel2, dev_vel1);
}
__global__ void kernSortPosAndVelByArrayIndicies(int N, glm::vec3* result_pos, glm::vec3* result_vel,
glm::vec3* pos, glm::vec3* vel, int* grid_array)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//fetch the pos and vel associated with the grid cell array
const int pos_and_vel_index = grid_array[index];
//copy over the pos/vel into the index that the grid array was in
result_pos[index] = pos[pos_and_vel_index];
result_vel[index] = vel[pos_and_vel_index];
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellPerGrid((gridCellCount + blockSize - 1) / blockSize);
//reset the grid cell start / end
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(cellPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, std::numeric_limits<int>::min());
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(cellPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, std::numeric_limits<int>::min());
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
//compute the indices
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAError();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
//sort with thrust, so we have <key = grid index, value = array_index>
thrust::device_ptr<int> thrust_grid_indices(dev_particleGridIndices);
thrust::device_ptr<int> thrust_array_indices(dev_particleArrayIndices);
thrust::sort_by_key(thrust_grid_indices, thrust_grid_indices + numObjects, thrust_array_indices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//sort the pos and vel indices by grid array
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAError();
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
//make sure the start and end indices are mapped to the correct grid indices
hipLaunchKernelGGL(( kernSortPosAndVelByArrayIndicies), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos_sorted, dev_vel_sorted, dev_pos, dev_vel1, dev_particleArrayIndices);
checkCUDAError();
// - Perform velocity updates using neighbor search
//this finds possible neighboring particles in neighboring grid cells to find calculate the total velocity for a particle
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos_sorted, dev_vel_sorted, dev_vel2);
checkCUDAError();
//update position base on sorted pos/vel
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos_sorted, dev_vel_sorted);
checkCUDAError();
//ping pong
swap_pointers(dev_vel2, dev_vel_sorted);
swap_pointers(dev_vel2, dev_vel1);
swap_pointers(dev_vel_sorted, dev_vel1);
swap_pointers(dev_pos, dev_pos_sorted);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
checkCUDAError();
hipFree(dev_vel2);
checkCUDAError();
hipFree(dev_pos);
checkCUDAError();
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
checkCUDAError();
hipFree(dev_particleGridIndices);
checkCUDAError();
hipFree(dev_gridCellStartIndices);
checkCUDAError();
hipFree(dev_gridCellEndIndices);
checkCUDAError();
//2.3 cleanup pos and vel struct
hipFree(dev_vel_sorted);
checkCUDAError();
hipFree(dev_pos_sorted);
checkCUDAError();
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAError();
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAError();
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
checkCUDAError();
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
checkCUDAError();
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAError();
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAError();
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
checkCUDAError();
hipFree(dev_intValues);
checkCUDAError();
return;
}
| dd93bc193c0922d8dfcc28f4eccbd05bb064e504.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <algorithm>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAError() \
do \
{ \
if(cudaPeekAtLastError() != cudaSuccess) \
{ \
std::cerr << cudaGetErrorString(cudaPeekAtLastError()) << __FILE__ << __LINE__ << "\n"; \
exit(-1); \
} \
} while(0) \
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
//number of neighboring grid cells to check
//LOOK-2.1
//having to check 27 grid cells would require more checks in the loop, so it
//would be less efficient than checking 8 grid cells
#define CHECK_8 0
#ifdef CHECK_8
constexpr int NEIGHBORS_TO_CHECK = 8;
constexpr int NEIGHBORS_TO_CHECK_WIDTH = 2;
#else
constexpr int NEIGHBORS_TO_CHECK = 27;
constexpr int NEIGHBORS_TO_CHECK_WIDTH = 3;
#endif
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
//pos and vel sorted by array indices
glm::vec3* dev_vel_sorted;
glm::vec3* dev_pos_sorted;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash(static_cast<int>(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3(static_cast<float>(unitDistrib(rng)), static_cast<float>(unitDistrib(rng)), static_cast<float>(unitDistrib(rng)));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
const int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
const glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc(reinterpret_cast<void**>(&dev_pos), N * sizeof(glm::vec3));
checkCUDAError();
cudaMalloc(reinterpret_cast<void**>(&dev_vel1), N * sizeof(glm::vec3));
checkCUDAError();
cudaMalloc(reinterpret_cast<void**>(&dev_vel2), N * sizeof(glm::vec3));
checkCUDAError();
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAError();
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = static_cast<int>(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
std::cout << gridCellCount << "-" << gridCellWidth << "-" << gridMinimum.x << "-" << gridMinimum.y << "-" << gridMinimum.z << "\n";
//10648-10--110--110--110
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc(reinterpret_cast<void**>(&dev_particleArrayIndices), N * sizeof(int));
checkCUDAError();
cudaMalloc(reinterpret_cast<void**>(&dev_particleGridIndices), N * sizeof(int));
checkCUDAError();
cudaMalloc(reinterpret_cast<void**>(&dev_gridCellStartIndices), gridCellCount * sizeof(int));
checkCUDAError();
cudaMalloc(reinterpret_cast<void**>(&dev_gridCellEndIndices), gridCellCount * sizeof(int));
checkCUDAError();
//part 2.3 allocate memory for position and velocity struct
cudaMalloc(reinterpret_cast<void**>(&dev_vel_sorted), N * sizeof(*dev_vel_sorted));
checkCUDAError();
cudaMalloc(reinterpret_cast<void**>(&dev_pos_sorted), N * sizeof(*dev_pos_sorted));
checkCUDAError();
cudaDeviceSynchronize();
checkCUDAError();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAError();
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
//swap pointers (helps with ping pong)
template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type* = nullptr>
void swap_pointers(T& p1, T& p2)
{
T temp = p1;
p1 = p2;
p2 = temp;
}
//clamps the vec3 by normalizing it
template<typename T>
__device__ glm::vec3 clamp_vec3(T&& vec)
{
if(glm::length(vec) > maxSpeed)
{
return glm::normalize(std::forward<T>(vec)) * maxSpeed;
}
return vec;
}
//helper function to check rule
template<typename CheckSuccessCallback>
__device__ void check_rule(float rule_distance, int this_boid, int other_boid, const glm::vec3* pos, CheckSuccessCallback check_success_callback)
{
const auto& this_boid_pos = pos[this_boid];
const auto& other_boid_pos = pos[other_boid];
if (this_boid != other_boid && glm::distance(this_boid_pos, other_boid_pos) < rule_distance)
{
check_success_callback();
}
}
//The following 3 functions checks two
//boids against each rule
__device__ void check_rule1(int this_boid, int other_boid, const glm::vec3* pos, glm::vec3& perceived_center, int& num_neighbors)
{
check_rule(rule1Distance, this_boid, other_boid, pos,
[&]()
{
const auto& other_boid_pos = pos[other_boid];
perceived_center += other_boid_pos;
num_neighbors++;
});
}
__device__ void check_rule2(int this_boid, int other_boid, const glm::vec3* pos, glm::vec3& c)
{
check_rule(rule2Distance, this_boid, other_boid, pos,
[&]()
{
const auto& this_boid_pos = pos[this_boid];
const auto& other_boid_pos = pos[other_boid];
c -= (other_boid_pos - this_boid_pos);
});
}
__device__ void check_rule3(int this_boid, int other_boid, const glm::vec3* pos, glm::vec3& perceived_velocity, int& num_neighbors,
const glm::vec3* vel)
{
check_rule(rule3Distance, this_boid, other_boid, pos,
[&]()
{
perceived_velocity += vel[other_boid];
num_neighbors++;
});
}
//The following 3 functions computes the
//rule velocity after all the boids in the
//area have been iterated through
__device__ glm::vec3 finish_rule1(const glm::vec3& this_boid_pos, glm::vec3& perceived_center, int& num_neighbors)
{
if(num_neighbors)
{
perceived_center /= num_neighbors;
return (perceived_center - this_boid_pos) * rule1Scale;
}
return {};
}
__device__ glm::vec3 finish_rule2(const glm::vec3& c)
{
return c * rule2Scale;
}
__device__ glm::vec3 finish_rule3(glm::vec3& perceived_velocity, int& num_neighbors)
{
if (num_neighbors)
{
perceived_velocity /= num_neighbors;
return perceived_velocity * rule3Scale;
}
return {};
}
//The following 3 functions computes each rule naively (iterate through all boids)
__device__ glm::vec3 compute_rule1_naive(int N, int this_boid, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 perceived_center{};
auto num_neighbors = 0;
for (auto other_boid = 0; other_boid < N; other_boid++)
{
check_rule1(this_boid, other_boid, pos, perceived_center, num_neighbors);
}
const auto& this_boid_pos = pos[this_boid];
return finish_rule1(this_boid_pos, perceived_center, num_neighbors);
}
__device__ glm::vec3 compute_rule2_naive(int N, int this_boid, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 c{};
for (auto other_boid = 0; other_boid < N; other_boid++)
{
check_rule2(this_boid, other_boid, pos, c);
}
return finish_rule2(c);
}
__device__ glm::vec3 compute_rule3_naive(int N, int this_boid, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 perceived_velocity{};
auto num_neighbors = 0;
for (auto other_boid = 0; other_boid < N; other_boid++)
{
check_rule3(this_boid, other_boid, pos, perceived_velocity, num_neighbors, vel);
}
return finish_rule3(perceived_velocity, num_neighbors);
}
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
//add the result of all the rules
auto result = compute_rule1_naive(N, iSelf, pos, vel) +
compute_rule2_naive(N, iSelf, pos, vel) +
compute_rule3_naive(N, iSelf, pos, vel);
return result;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
// If vel1 is updated, then another GPU thread can update the vel1
// and that would cause inconsistency.
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
const auto vel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
vel2[index] = clamp_vec3(vel);
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
//probably
//for(z)
// for(y)
// for(x)
//because x is constantly changing, while y and z
//are less frequently changing in the loop
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//set the indices of the boid to match the thread index (same index as the position and velocity indices)
indices[index] = index;
//pos[index] - gridMin is the boid's position, with instead of
//offsetting at [-100:100], it's offseted at [0:200]
const auto b_pos = pos[index] - gridMin;
//get boid's position index or "grid cube" that the boid is in (truncated to an int)
const glm::ivec3 b_pos_index = b_pos * inverseCellWidth;
//the position is converted to a 1d int (more efficient instead of holding the entire vec3)
gridIndices[index] = gridIndex3Dto1D(b_pos_index.x, b_pos_index.y, b_pos_index.z, gridResolution);
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
//how to expand variadic templates in c++11
//https://stackoverflow.com/questions/41623422/c-expand-variadic-template-arguments-into-a-statement
template<typename Type = int, typename... Ts>
__device__ auto truncate_to(Ts*... ts)
{
(void)std::initializer_list<int>{(*ts = static_cast<Type>(*ts), 0)...};
}
//truncate glm vec to int
template<typename Vec, typename ConvertToType = int>
__device__ void truncate_glm_vec(Vec& vec)
{
truncate_to<ConvertToType>(&vec.x, &vec.y, &vec.z);
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//get the sorted boid grid value
const auto previous_grid_cell_value = particleGridIndices[index - 1];
const auto grid_cell_value = particleGridIndices[index];
//if we are at the first grid index,
//we know that the grid index must be in a start index
if(!index)
{
gridCellStartIndices[grid_cell_value] = index;
}
//get the previous element, and check if they belong to the same grid
//if not, then we know that the grid_cell_value must be the
//start of a new grid index while previous_grid_cell_value must be
//the last element to the previous grid cell group
//c++17 if initializers: (VS15 doesn't have that support =/)
//if(const auto previous_grid_cell = ...; previous_grid_cell != grid_cell)
else
{
if (previous_grid_cell_value != grid_cell_value)
{
gridCellStartIndices[grid_cell_value] = index;
gridCellEndIndices[previous_grid_cell_value] = index;
}
//still need to check for the last grid element and set
//the end indices to point to this grid index
else if(index == N - 1)
{
gridCellEndIndices[grid_cell_value] = index;
}
}
}
template<int neighbor_max_size = NEIGHBORS_TO_CHECK, int neighbor_width = NEIGHBORS_TO_CHECK_WIDTH, typename CheckBoidFunc>
__device__ void do_something_with_boid_neighbors(const glm::vec3 b_pos, int grid_resolution, float cell_width, const glm::vec3& grid_min, int* grid_cell_start_index, int* grid_cell_end_index, CheckBoidFunc check_boid_func)
{
//offset boid position to [0:200]
const glm::vec3 b_pos_offset = b_pos - grid_min;
//truncate the float position into an int position
glm::vec3 b_pos_int = b_pos_offset;
truncate_glm_vec(b_pos_int);
//this is the boid grid cell index (calculated by dividing by cell width)
const glm::ivec3 b_cell_index = b_pos_offset / cell_width;
//now get the position of the boid inside the cellWidth, which is cut up into 8
const glm::vec3 b_pos_inside_cell_width = (b_pos_offset - b_pos_int) * cell_width;
//find all the neighbors and populate neighbor array
//which side is the grid on?
enum class GridCellSide : uint8_t
{
Left,
Right
};
//find which side are we on (point is either x, y, z)
auto find_grid_cell_side = [&](float point)
{
return point < (cell_width / 2) ? GridCellSide::Left : GridCellSide::Right;
};
//find the side for x, y, z
const GridCellSide x_side = find_grid_cell_side(b_pos_inside_cell_width.x);
const GridCellSide y_side = find_grid_cell_side(b_pos_inside_cell_width.y);
const GridCellSide z_side = find_grid_cell_side(b_pos_inside_cell_width.z);
//find which side to iterate to (either -1 or 0)
int x_offset = -neighbor_width + 1;
int y_offset = -neighbor_width + 1;
int z_offset = -neighbor_width + 1;
if(x_side == GridCellSide::Right)
{
x_offset = 0;
}
if(y_side == GridCellSide::Right)
{
y_offset = 0;
}
if(z_side == GridCellSide::Right)
{
z_offset = 0;
}
//iterate x (either from -1 ... 0 or 0 ... 1)
for(int i = x_offset; i < x_offset + neighbor_width; ++i)
{
const int x = b_cell_index.x + i;
//check if out of bounds
if(x < 0 || x >= grid_resolution)
{
continue;
}
//iterate y
for(int k = y_offset; k < y_offset + neighbor_width; ++k)
{
const int y = b_cell_index.y + k;
//check if out of bounds
if (y < 0 || y >= grid_resolution)
{
continue;
}
//iterate z
for(int l = z_offset; l < z_offset + neighbor_width; ++l)
{
const int z = b_cell_index.z + l;
//check if out of bounds
if (z < 0 || z >= grid_resolution)
{
continue;
}
//get the index into the grid_cell_start_index/grid_cell_end_inde
const int index_into_grid_cell_pointer_index = gridIndex3Dto1D(x, y, z, grid_resolution);
//compute the start and end indices for the grid cell index
auto grid_start_index = grid_cell_start_index[index_into_grid_cell_pointer_index];
const auto grid_end_index = grid_cell_end_index[index_into_grid_cell_pointer_index];
//iterate through the boids in the grid cell
for (; grid_start_index < grid_end_index; ++grid_start_index)
{
//pass in the boid array index
check_boid_func(grid_start_index);
}
}
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
//get the boid index
const auto this_boid_index = particleArrayIndices[index];
//get location of boid (float)
const auto this_boid_pos = pos[this_boid_index];
//do rule1, rule2, rule3
int rule1_num_neighbors = 0;
int rule3_num_neighbors = 0;
glm::vec3 perceived_center = {};
glm::vec3 c = {};
glm::vec3 perceived_velocity = {};
//for each neighbor, compute the rules and add them to perceived_center, c, perceived_velocity (rules 1, 2, 3)
do_something_with_boid_neighbors(this_boid_pos,
gridResolution, cellWidth,
gridMin, gridCellStartIndices,
gridCellEndIndices,
[&](const int index_into_boid_array)
{
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
const int other_boid_index = particleArrayIndices[index_into_boid_array];
check_rule1(this_boid_index, other_boid_index, pos,
perceived_center, rule1_num_neighbors);
check_rule2(this_boid_index, other_boid_index, pos, c);
check_rule3(this_boid_index, other_boid_index, pos,
perceived_velocity, rule3_num_neighbors, vel1);
}
);
// - Clamp the speed change before putting the new speed in vel2
//compute each rule
const auto rule_1_result = finish_rule1(this_boid_pos, perceived_center, rule1_num_neighbors);
const auto rule_2_result = finish_rule2(c);
const auto rule_3_result = finish_rule3(perceived_velocity, rule3_num_neighbors);
//add to velocity
const auto vel = vel1[this_boid_index] + rule_1_result + rule_2_result + rule_3_result;
vel2[this_boid_index] = clamp_vec3(vel);
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
//get the boid index
const auto this_boid_index = index;
//get location of boid (float)
const auto this_boid_pos = pos[this_boid_index];
//do rule1, rule2, rule3
int rule1_num_neighbors = 0;
int rule3_num_neighbors = 0;
glm::vec3 perceived_center = {};
glm::vec3 c = {};
glm::vec3 perceived_velocity = {};
//for each neighbor, compute the rules and add them to perceived_center, c, perceived_velocity (rules 1, 2, 3)
do_something_with_boid_neighbors(this_boid_pos,
gridResolution, cellWidth,
gridMin, gridCellStartIndices,
gridCellEndIndices,
[&](const int index_into_boid_array)
{
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
const int other_boid_index = index_into_boid_array;
check_rule1(this_boid_index, other_boid_index, pos,
perceived_center, rule1_num_neighbors);
check_rule2(this_boid_index, other_boid_index, pos, c);
check_rule3(this_boid_index, other_boid_index, pos,
perceived_velocity, rule3_num_neighbors, vel1);
}
);
// - Clamp the speed change before putting the new speed in vel2
//compute each rule
const auto rule_1_result = finish_rule1(this_boid_pos, perceived_center, rule1_num_neighbors);
const auto rule_2_result = finish_rule2(c);
const auto rule_3_result = finish_rule3(perceived_velocity, rule3_num_neighbors);
//add to velocity
const auto vel = vel1[this_boid_index] + rule_1_result + rule_2_result + rule_3_result;
vel2[this_boid_index] = clamp_vec3(vel);
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAError();
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel1);
checkCUDAError();
swap_pointers(dev_vel2, dev_vel1);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellPerGrid((gridCellCount + blockSize - 1) / blockSize);
//reset the grid cell start / end
kernResetIntBuffer<<<cellPerGrid, blockSize>>>(gridCellCount, dev_gridCellStartIndices, std::numeric_limits<int>::min());
kernResetIntBuffer<<<cellPerGrid, blockSize>>>(gridCellCount, dev_gridCellEndIndices, std::numeric_limits<int>::min());
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
//compute the indices
kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAError();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
//sort with thrust, so we have <key = grid index, value = array_index>
thrust::device_ptr<int> thrust_grid_indices(dev_particleGridIndices);
thrust::device_ptr<int> thrust_array_indices(dev_particleArrayIndices);
thrust::sort_by_key(thrust_grid_indices, thrust_grid_indices + numObjects, thrust_array_indices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//make sure the start and end indices are mapped to the correct grid indices
kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAError();
// - Perform velocity updates using neighbor search
//this finds possible neighboring particles in neighboring grid cells to find calculate the total velocity for a particle
kernUpdateVelNeighborSearchScattered<<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAError();
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel1);
checkCUDAError();
swap_pointers(dev_vel2, dev_vel1);
}
__global__ void kernSortPosAndVelByArrayIndicies(int N, glm::vec3* result_pos, glm::vec3* result_vel,
glm::vec3* pos, glm::vec3* vel, int* grid_array)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//fetch the pos and vel associated with the grid cell array
const int pos_and_vel_index = grid_array[index];
//copy over the pos/vel into the index that the grid array was in
result_pos[index] = pos[pos_and_vel_index];
result_vel[index] = vel[pos_and_vel_index];
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellPerGrid((gridCellCount + blockSize - 1) / blockSize);
//reset the grid cell start / end
kernResetIntBuffer<<<cellPerGrid, blockSize>>>(gridCellCount, dev_gridCellStartIndices, std::numeric_limits<int>::min());
kernResetIntBuffer<<<cellPerGrid, blockSize>>>(gridCellCount, dev_gridCellEndIndices, std::numeric_limits<int>::min());
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
//compute the indices
kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAError();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
//sort with thrust, so we have <key = grid index, value = array_index>
thrust::device_ptr<int> thrust_grid_indices(dev_particleGridIndices);
thrust::device_ptr<int> thrust_array_indices(dev_particleArrayIndices);
thrust::sort_by_key(thrust_grid_indices, thrust_grid_indices + numObjects, thrust_array_indices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//sort the pos and vel indices by grid array
kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAError();
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
//make sure the start and end indices are mapped to the correct grid indices
kernSortPosAndVelByArrayIndicies<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos_sorted, dev_vel_sorted, dev_pos, dev_vel1, dev_particleArrayIndices);
checkCUDAError();
// - Perform velocity updates using neighbor search
//this finds possible neighboring particles in neighboring grid cells to find calculate the total velocity for a particle
kernUpdateVelNeighborSearchCoherent<<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos_sorted, dev_vel_sorted, dev_vel2);
checkCUDAError();
//update position base on sorted pos/vel
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos_sorted, dev_vel_sorted);
checkCUDAError();
//ping pong
swap_pointers(dev_vel2, dev_vel_sorted);
swap_pointers(dev_vel2, dev_vel1);
swap_pointers(dev_vel_sorted, dev_vel1);
swap_pointers(dev_pos, dev_pos_sorted);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
checkCUDAError();
cudaFree(dev_vel2);
checkCUDAError();
cudaFree(dev_pos);
checkCUDAError();
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
checkCUDAError();
cudaFree(dev_particleGridIndices);
checkCUDAError();
cudaFree(dev_gridCellStartIndices);
checkCUDAError();
cudaFree(dev_gridCellEndIndices);
checkCUDAError();
//2.3 cleanup pos and vel struct
cudaFree(dev_vel_sorted);
checkCUDAError();
cudaFree(dev_pos_sorted);
checkCUDAError();
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAError();
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAError();
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
checkCUDAError();
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
checkCUDAError();
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAError();
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAError();
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
checkCUDAError();
cudaFree(dev_intValues);
checkCUDAError();
return;
}
|
7f6f02d28afd9701fa838b26c864674add70fce7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// require T <= Tmax, T % 4 == 0, B % BF == 0, B % BB === 0 (Tmax and BF and BB are passed by compiler)
#define F4(A, B) ((float4 *)(A))[(B) >> 2]
template <typename F>
__global__ void kernel_forward(const F *__restrict__ const __w, const F *__restrict__ const __k, F *__restrict__ const x,
const F eps, const int B, const int C, const int T) {
const int i = blockIdx.y;
const int ij = (B * C) / BF;
const int t = threadIdx.x << 2;
__shared__ F ww[Tmax];
__shared__ F kk[Tmax * BF];
F4(ww, t) = F4(__w, t + T * (i % C));
#pragma unroll
for (int j = 0; j < BF; j++) {
F4(kk, t + Tmax * j) = F4(__k, t + T * (i + ij * j));
}
__syncthreads();
float4 s[BF];
#pragma unroll
for (int j = 0; j < BF; j++) {
s[j] = {eps, eps, eps, eps};
}
const F *__restrict__ const w = ww + T - t - 4;
for (int u = 0; u <= t; u++) {
#pragma unroll
for (int j = 0; j < BF; j++) {
const F x = kk[u + Tmax * j];
s[j].x += w[u + 3] * x;
s[j].y += w[u + 2] * x;
s[j].z += w[u + 1] * x;
s[j].w += w[u + 0] * x;
}
}
#pragma unroll
for (int j = 0; j < BF; j++) {
const F *__restrict__ const k = kk + Tmax * j;
s[j].y += w[t + 3] * k[t + 1];
s[j].z += w[t + 2] * k[t + 1];
s[j].z += w[t + 3] * k[t + 2];
s[j].w += w[t + 1] * k[t + 1];
s[j].w += w[t + 2] * k[t + 2];
s[j].w += w[t + 3] * k[t + 3];
F4(x, t + T * (i + ij * j)) = s[j];
}
}
template <typename F>
__global__ void kernel_backward_W(const F *__restrict__ const __w, const F *__restrict__ const __k, const F *__restrict__ const __gwk,
F *__restrict__ const gw, F *__restrict__ const gk,
const int B, const int C, const int T) {
const int i = blockIdx.y;
const int t = threadIdx.x << 2;
__shared__ F k[Tmax];
__shared__ F gg[Tmax];
F4(k, t) = F4(__k, t + T * i);
F4(gg, t) = F4(__gwk, t + T * i);
__syncthreads();
float4 s = {0, 0, 0, 0};
const F *__restrict__ const g = gg + T - t - 4;
for (int u = 0; u <= t; u++) {
F x = k[u];
s.x += g[u + 3] * x;
s.y += g[u + 2] * x;
s.z += g[u + 1] * x;
s.w += g[u + 0] * x;
}
s.y += g[t + 3] * k[t + 1];
s.z += g[t + 2] * k[t + 1];
s.z += g[t + 3] * k[t + 2];
s.w += g[t + 1] * k[t + 1];
s.w += g[t + 2] * k[t + 2];
s.w += g[t + 3] * k[t + 3];
F4(gw, t + T * i) = s;
}
void cuda_forward(const float *w, const float *k, float *x, float eps, int B, int C, int T) {
dim3 gridDim(1, B * C / BF);
dim3 blockDim(T >> 2);
hipLaunchKernelGGL(( kernel_forward), dim3(gridDim), dim3(blockDim), 0, 0, w, k, x, eps, B, C, T);
}
template <typename F>
__global__ void kernel_backward(const F *__restrict__ const __w, const F *__restrict__ const __k, const F *__restrict__ const __gwk,
F *__restrict__ const gw, F *__restrict__ const gk,
const int B, const int C, const int T) {
const int i = blockIdx.y;
const int ij = (B * C) / BB;
const int t = threadIdx.x << 2;
__shared__ F w[Tmax];
__shared__ F kk[Tmax * BB];
__shared__ F gg[Tmax * BB];
F4(w, t) = F4(__w, t + T * (i % C));
#pragma unroll
for (int j = 0; j < BB; j++) {
F4(kk, t + Tmax * j) = F4(__k, t + T * (i + ij * j));
F4(gg, t + Tmax * j) = F4(__gwk, t + T * (i + ij * j));
}
__syncthreads();
float4 s[BB];
#pragma unroll
for (int j = 0; j < BB; j++) {
s[j] = {0, 0, 0, 0};
}
for (int u = 0; u <= t; u++) {
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const g = gg + Tmax * j + T - t - 4;
F x = kk[u + Tmax * j];
s[j].x += g[u + 3] * x;
s[j].y += g[u + 2] * x;
s[j].z += g[u + 1] * x;
s[j].w += g[u + 0] * x;
}
}
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const k = kk + Tmax * j;
const F *__restrict__ const g = gg + Tmax * j + T - t - 4;
s[j].y += g[t + 3] * k[t + 1];
s[j].z += g[t + 2] * k[t + 1];
s[j].z += g[t + 3] * k[t + 2];
s[j].w += g[t + 1] * k[t + 1];
s[j].w += g[t + 2] * k[t + 2];
s[j].w += g[t + 3] * k[t + 3];
F4(gw, t + T * (i + ij * j)) = s[j];
}
#pragma unroll
for (int j = 0; j < BB; j++) {
s[j] = {0, 0, 0, 0};
}
for (int u = t + 3; u < T; u++) {
F x = w[u];
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const g = gg + Tmax * j + T + t - 3;
s[j].x += g[2 - u] * x;
s[j].y += g[3 - u] * x;
s[j].z += g[4 - u] * x;
s[j].w += g[5 - u] * x;
}
}
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const g = gg + Tmax * j + T + t - 3;
s[j].x += g[2 - t] * w[t + 0];
s[j].x += g[1 - t] * w[t + 1];
s[j].x += g[0 - t] * w[t + 2];
s[j].y += g[2 - t] * w[t + 1];
s[j].y += g[1 - t] * w[t + 2];
s[j].z += g[2 - t] * w[t + 2];
F4(gk, t + T * (i + ij * j)) = s[j];
}
}
void cuda_backward(const float *w, const float *k, const float *gwk, float *gw, float *gk, int B, int C, int T) {
dim3 gridDim(1, B * C / BB);
dim3 blockDim(T >> 2);
hipLaunchKernelGGL(( kernel_backward), dim3(gridDim), dim3(blockDim), 0, 0, w, k, gwk, gw, gk, B, C, T);
}
| 7f6f02d28afd9701fa838b26c864674add70fce7.cu | #include <stdio.h>
// require T <= Tmax, T % 4 == 0, B % BF == 0, B % BB === 0 (Tmax and BF and BB are passed by compiler)
#define F4(A, B) ((float4 *)(A))[(B) >> 2]
template <typename F>
__global__ void kernel_forward(const F *__restrict__ const __w, const F *__restrict__ const __k, F *__restrict__ const x,
const F eps, const int B, const int C, const int T) {
const int i = blockIdx.y;
const int ij = (B * C) / BF;
const int t = threadIdx.x << 2;
__shared__ F ww[Tmax];
__shared__ F kk[Tmax * BF];
F4(ww, t) = F4(__w, t + T * (i % C));
#pragma unroll
for (int j = 0; j < BF; j++) {
F4(kk, t + Tmax * j) = F4(__k, t + T * (i + ij * j));
}
__syncthreads();
float4 s[BF];
#pragma unroll
for (int j = 0; j < BF; j++) {
s[j] = {eps, eps, eps, eps};
}
const F *__restrict__ const w = ww + T - t - 4;
for (int u = 0; u <= t; u++) {
#pragma unroll
for (int j = 0; j < BF; j++) {
const F x = kk[u + Tmax * j];
s[j].x += w[u + 3] * x;
s[j].y += w[u + 2] * x;
s[j].z += w[u + 1] * x;
s[j].w += w[u + 0] * x;
}
}
#pragma unroll
for (int j = 0; j < BF; j++) {
const F *__restrict__ const k = kk + Tmax * j;
s[j].y += w[t + 3] * k[t + 1];
s[j].z += w[t + 2] * k[t + 1];
s[j].z += w[t + 3] * k[t + 2];
s[j].w += w[t + 1] * k[t + 1];
s[j].w += w[t + 2] * k[t + 2];
s[j].w += w[t + 3] * k[t + 3];
F4(x, t + T * (i + ij * j)) = s[j];
}
}
template <typename F>
__global__ void kernel_backward_W(const F *__restrict__ const __w, const F *__restrict__ const __k, const F *__restrict__ const __gwk,
F *__restrict__ const gw, F *__restrict__ const gk,
const int B, const int C, const int T) {
const int i = blockIdx.y;
const int t = threadIdx.x << 2;
__shared__ F k[Tmax];
__shared__ F gg[Tmax];
F4(k, t) = F4(__k, t + T * i);
F4(gg, t) = F4(__gwk, t + T * i);
__syncthreads();
float4 s = {0, 0, 0, 0};
const F *__restrict__ const g = gg + T - t - 4;
for (int u = 0; u <= t; u++) {
F x = k[u];
s.x += g[u + 3] * x;
s.y += g[u + 2] * x;
s.z += g[u + 1] * x;
s.w += g[u + 0] * x;
}
s.y += g[t + 3] * k[t + 1];
s.z += g[t + 2] * k[t + 1];
s.z += g[t + 3] * k[t + 2];
s.w += g[t + 1] * k[t + 1];
s.w += g[t + 2] * k[t + 2];
s.w += g[t + 3] * k[t + 3];
F4(gw, t + T * i) = s;
}
void cuda_forward(const float *w, const float *k, float *x, float eps, int B, int C, int T) {
dim3 gridDim(1, B * C / BF);
dim3 blockDim(T >> 2);
kernel_forward<<<gridDim, blockDim>>>(w, k, x, eps, B, C, T);
}
template <typename F>
__global__ void kernel_backward(const F *__restrict__ const __w, const F *__restrict__ const __k, const F *__restrict__ const __gwk,
F *__restrict__ const gw, F *__restrict__ const gk,
const int B, const int C, const int T) {
const int i = blockIdx.y;
const int ij = (B * C) / BB;
const int t = threadIdx.x << 2;
__shared__ F w[Tmax];
__shared__ F kk[Tmax * BB];
__shared__ F gg[Tmax * BB];
F4(w, t) = F4(__w, t + T * (i % C));
#pragma unroll
for (int j = 0; j < BB; j++) {
F4(kk, t + Tmax * j) = F4(__k, t + T * (i + ij * j));
F4(gg, t + Tmax * j) = F4(__gwk, t + T * (i + ij * j));
}
__syncthreads();
float4 s[BB];
#pragma unroll
for (int j = 0; j < BB; j++) {
s[j] = {0, 0, 0, 0};
}
for (int u = 0; u <= t; u++) {
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const g = gg + Tmax * j + T - t - 4;
F x = kk[u + Tmax * j];
s[j].x += g[u + 3] * x;
s[j].y += g[u + 2] * x;
s[j].z += g[u + 1] * x;
s[j].w += g[u + 0] * x;
}
}
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const k = kk + Tmax * j;
const F *__restrict__ const g = gg + Tmax * j + T - t - 4;
s[j].y += g[t + 3] * k[t + 1];
s[j].z += g[t + 2] * k[t + 1];
s[j].z += g[t + 3] * k[t + 2];
s[j].w += g[t + 1] * k[t + 1];
s[j].w += g[t + 2] * k[t + 2];
s[j].w += g[t + 3] * k[t + 3];
F4(gw, t + T * (i + ij * j)) = s[j];
}
#pragma unroll
for (int j = 0; j < BB; j++) {
s[j] = {0, 0, 0, 0};
}
for (int u = t + 3; u < T; u++) {
F x = w[u];
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const g = gg + Tmax * j + T + t - 3;
s[j].x += g[2 - u] * x;
s[j].y += g[3 - u] * x;
s[j].z += g[4 - u] * x;
s[j].w += g[5 - u] * x;
}
}
#pragma unroll
for (int j = 0; j < BB; j++) {
const F *__restrict__ const g = gg + Tmax * j + T + t - 3;
s[j].x += g[2 - t] * w[t + 0];
s[j].x += g[1 - t] * w[t + 1];
s[j].x += g[0 - t] * w[t + 2];
s[j].y += g[2 - t] * w[t + 1];
s[j].y += g[1 - t] * w[t + 2];
s[j].z += g[2 - t] * w[t + 2];
F4(gk, t + T * (i + ij * j)) = s[j];
}
}
void cuda_backward(const float *w, const float *k, const float *gwk, float *gw, float *gk, int B, int C, int T) {
dim3 gridDim(1, B * C / BB);
dim3 blockDim(T >> 2);
kernel_backward<<<gridDim, blockDim>>>(w, k, gwk, gw, gk, B, C, T);
}
|
a167732505e5c7f75269c89f1d577e824ab59690.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace std;
__global__ void func(int *dev_arr1, int *dev_arr2, int *dev_dot){
__shared__ int temp[3];
int index = threadIdx.x;
if (index < 3){
temp[index] = dev_arr1[index] * dev_arr2[index];
}
__syncthreads();
if (index == 0){
*dev_dot = temp[0] + temp[1] + temp[2];
}
}
int main(){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int *arr1, *arr2, *dot;
int *dev_arr1, *dev_arr2, *dev_dot;
int size = 3 * sizeof(int); // i j k
arr1 = (int*) malloc(size);
arr2 = (int*) malloc(size);
dot = (int*) malloc(sizeof(int));
hipMalloc(&dev_arr1, size);
hipMalloc(&dev_arr2, size);
hipMalloc(&dev_dot, sizeof(int));
cout << "Enter values of x, y, z. Vector1: xi + yj + zk: ";
cin>>arr1[0]>>arr1[1]>>arr1[2];
cout << "Enter values of x, y, z. Vector2: xi + yj + zk: ";
cin>>arr2[0]>>arr2[1]>>arr2[2];
hipMemcpy(dev_arr1, arr1, size, hipMemcpyHostToDevice);
hipMemcpy(dev_arr2, arr2, size, hipMemcpyHostToDevice);
hipMemcpy(dev_dot, dot, sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( func), dim3(1),dim3(3), 0, 0, dev_arr1, dev_arr2, dev_dot);
hipEventRecord(stop);
hipMemcpy(dot, dev_dot, sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cout << "Dot product is: " << *dot << endl;
float millis = 0;
hipEventElapsedTime(&millis, start, stop);
cout << "Elasped Time: " << millis << endl;
return 0;
} | a167732505e5c7f75269c89f1d577e824ab59690.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
__global__ void func(int *dev_arr1, int *dev_arr2, int *dev_dot){
__shared__ int temp[3];
int index = threadIdx.x;
if (index < 3){
temp[index] = dev_arr1[index] * dev_arr2[index];
}
__syncthreads();
if (index == 0){
*dev_dot = temp[0] + temp[1] + temp[2];
}
}
int main(){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *arr1, *arr2, *dot;
int *dev_arr1, *dev_arr2, *dev_dot;
int size = 3 * sizeof(int); // i j k
arr1 = (int*) malloc(size);
arr2 = (int*) malloc(size);
dot = (int*) malloc(sizeof(int));
cudaMalloc(&dev_arr1, size);
cudaMalloc(&dev_arr2, size);
cudaMalloc(&dev_dot, sizeof(int));
cout << "Enter values of x, y, z. Vector1: xi + yj + zk: ";
cin>>arr1[0]>>arr1[1]>>arr1[2];
cout << "Enter values of x, y, z. Vector2: xi + yj + zk: ";
cin>>arr2[0]>>arr2[1]>>arr2[2];
cudaMemcpy(dev_arr1, arr1, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_arr2, arr2, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_dot, dot, sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
func<<<1,3>>>(dev_arr1, dev_arr2, dev_dot);
cudaEventRecord(stop);
cudaMemcpy(dot, dev_dot, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cout << "Dot product is: " << *dot << endl;
float millis = 0;
cudaEventElapsedTime(&millis, start, stop);
cout << "Elasped Time: " << millis << endl;
return 0;
} |
21807bbdcd718860a4393d1c835930537f4e7d05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2015 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_quadrature.h"
__device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z)
{
*x = r * sin(theta) * cos(phi);
*y = r * sin(theta) * sin(phi);
*z = r * cos(theta);
}
__device__ void cart2sphere(real u, real v, real w, real theta, real phi,
real *ur, real *ut, real *up)
{
real st = sin(theta);
real ct = cos(theta);
real sp = sin(phi);
real cp = cos(phi);
*ur = st * (u * cp + v * sp) + w * ct;
*ut = ct * (u * cp + v * sp) - w * st;
*up = -u * sp + v * cp;
}
__global__ void check_nodes(int nparts, part_struct *parts, dom_struct *dom,
real *theta, real *phi, int nnodes, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
// start off with all -1's
parts[part].nodes[node] = -1;
// check if the node is interfered with by a wall
// compute distance between node and walls
// set equal to some number to identify which wall is interefering
if(x - dom->xs < 0) {
if(bc.uW == DIRICHLET || bc.vW == DIRICHLET || bc.wW == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -10;
} if(x - dom->xe > 0) {
if(bc.uE == DIRICHLET || bc.vE == DIRICHLET || bc.wE == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -11;
} if(y - dom->ys < 0) {
if(bc.uS == DIRICHLET || bc.vS == DIRICHLET || bc.wS == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -12;
} if(y - dom->ye > 0) {
if(bc.uN == DIRICHLET || bc.vN == DIRICHLET || bc.wN == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -13;
} if(z - dom->zs < 0) {
if(bc.uB == DIRICHLET || bc.vB == DIRICHLET || bc.wB == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -14;
} if(z - dom->ze > 0) {
if(bc.uT == DIRICHLET || bc.vT == DIRICHLET || bc.wT == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -15;
}
}
__global__ void interpolate_nodes(real *p0, real *p, real *u, real *v, real *w,
real rho_f, real nu, gradP_struct gradP,
part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes,
real *pp, real *ur, real *ut, real *up, real dt0, real dt, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// the node number of the intersecting node
int intnode = parts[part].nodes[node];
if(intnode < 0) intnode = part;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
real ox = parts[part].ox;
real oy = parts[part].oy;
real oz = parts[part].oz;
real oxdot = parts[part].oxdot;
real oydot = parts[part].oydot;
real ozdot = parts[part].ozdot;
real udot = parts[part].udot;
real vdot = parts[part].vdot;
real wdot = parts[part].wdot;
real uu, vv, ww; // temporary nodes for Cartesian result of interpolation
real uuwall, vvwall, wwwall;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl;
else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl;
if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl;
else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl;
if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl;
else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl;
__syncthreads();
// find index of cell containing node
int i = floor((x - dom->xs) * ddx) + DOM_BUF;
int j = floor((y - dom->ys) * ddy) + DOM_BUF;
int k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gcc.is) i = dom->Gcc.is;
if(j < dom->Gcc.js) j = dom->Gcc.js;
if(k < dom->Gcc.ks) k = dom->Gcc.ks;
if(i > dom->Gcc.ie-1) i = dom->Gcc.ie-1;
if(j > dom->Gcc.je-1) j = dom->Gcc.je-1;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ke-1;
int C = i + j*dom->Gcc.s1b + k*dom->Gcc.s2b;
// Cartesian location of center of cell
real xx = (i-0.5) * dom->dx + dom->xs;
real yy = (j-0.5) * dom->dy + dom->ys;
real zz = (k-0.5) * dom->dz + dom->zs;
// interpolate pressure
real pc = p[C];
real pw = p[C-1];
real pe = p[C+1];
real ps = p[C-dom->Gcc.s1b];
real pn = p[C+dom->Gcc.s1b];
real pb = p[C-dom->Gcc.s2b];
real pt = p[C+dom->Gcc.s2b];
real dpdx = 0.5*(pe - pw) * ddx;
real dpdy = 0.5*(pn - ps) * ddy;
real dpdz = 0.5*(pt - pb) * ddz;
pp[node+nnodes*part] = pc + dpdx*(x-xx) + dpdy*(y-yy) + dpdz*(z-zz);
// switch to particle rest frame
real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp);
ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp);
ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*xp + (-gradP.y/rhoV - vdot)*yp
+ (-gradP.z/rhoV - wdot)*zp;
pp[node+nnodes*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// zero if this node intersects wall
pp[node+nnodes*part] = (parts[part].nodes[node]==-1)*pp[node+part*nnodes];
// interpolate velocities
// don't work with cell-center anymore;
// find closest cell face in x-direction
// interpolate u-velocity
i = round((x - dom->xs) * ddx - 0.5) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfx.is) i = dom->Gfx.is;
if(j < dom->Gfx.js) j = dom->Gfx.js;
if(k < dom->Gfx.ks) k = dom->Gfx.ks;
if(i > dom->Gfx.ie-1) i = dom->Gfx.ie-1;
if(j > dom->Gfx.je-1) j = dom->Gfx.je-1;
if(k > dom->Gfx.ke-1) k = dom->Gfx.ke-1;
xx = (i-DOM_BUF) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfx.s1b + k*dom->Gfx.s2b;
real dudx = 0.5*(u[C+1] - u[C-1]) * ddx;
real dudy = 0.5*(u[C+dom->Gfx.s1b] - u[C-dom->Gfx.s1b]) * ddy;
real dudz = 0.5*(u[C+dom->Gfx.s2b] - u[C-dom->Gfx.s2b]) * ddz;
uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz);
// set uuwall equal to interfering wall u-velocity
uuwall = (parts[part].nodes[node] == -10)*bc.uWD
+ (parts[part].nodes[node] == -11)*bc.uED
+ (parts[part].nodes[node] == -12)*bc.uSD
+ (parts[part].nodes[node] == -13)*bc.uND
+ (parts[part].nodes[node] == -14)*bc.uBD
+ (parts[part].nodes[node] == -15)*bc.uTD;
// switch to particle rest frame
real rs3 = parts[part].rs*parts[part].rs*parts[part].rs;
real rs5 = rs3*parts[part].rs*parts[part].rs;
real a5 = parts[part].r*parts[part].r*parts[part].r*parts[part].r*parts[part].r;
real ocrossr_x = oy*zp - oz*yp;
real odotcrossr_x = oydot*zp - ozdot*yp;
uu -= parts[part].u + ocrossr_x;
uu -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
uuwall -= parts[part].u + ocrossr_x;
uuwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
// set actual node value based on whether it is interfered with
uu = (parts[part].nodes[node]==-1)*uu
+ (parts[part].nodes[node]<-1)*uuwall;
//printf("uu = %f uuwall = %f\n", uu + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x, uuwall + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x);
// interpolate v-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = round((y - dom->ys) * ddy - 0.5) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfy.is) i = dom->Gfy.is;
if(j < dom->Gfy.js) j = dom->Gfy.js;
if(k < dom->Gfy.ks) k = dom->Gfy.ks;
if(i > dom->Gfy.ie-1) i = dom->Gfy.ie-1;
if(j > dom->Gfy.je-1) j = dom->Gfy.je-1;
if(k > dom->Gfy.ke-1) k = dom->Gfy.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-DOM_BUF) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfy.s1b + k*dom->Gfy.s2b;
real dvdx = 0.5*(v[C+1] - v[C-1]) * ddx;
real dvdy = 0.5*(v[C+dom->Gfy.s1b] - v[C-dom->Gfy.s1b]) * ddy;
real dvdz = 0.5*(v[C+dom->Gfy.s2b] - v[C-dom->Gfy.s2b]) * ddz;
vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz);
// set vvwall equal to interfering wall v-velocity
vvwall = (parts[part].nodes[node] == -10)*bc.vWD
+ (parts[part].nodes[node] == -11)*bc.vED
+ (parts[part].nodes[node] == -12)*bc.vSD
+ (parts[part].nodes[node] == -13)*bc.vND
+ (parts[part].nodes[node] == -14)*bc.vBD
+ (parts[part].nodes[node] == -15)*bc.vTD;
// switch to particle rest frame
real ocrossr_y = -(ox*zp - oz*xp);
real odotcrossr_y = -(oxdot*zp - ozdot*xp);
vv -= parts[part].v + ocrossr_y;
vv -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
vvwall -= parts[part].v + ocrossr_y;
vvwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
// set actual node value based on whether it is interfered with
vv = (parts[part].nodes[node]==-1)*vv
+ (parts[part].nodes[node]<-1)*vvwall;
// interpolate w-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = round((z - dom->zs) * ddz - 0.5) + DOM_BUF;
if(i < dom->Gfz.is) i = dom->Gfz.is;
if(j < dom->Gfz.js) j = dom->Gfz.js;
if(k < dom->Gfz.ks) k = dom->Gfz.ks;
if(i > dom->Gfz.ie-1) i = dom->Gfz.ie-1;
if(j > dom->Gfz.je-1) j = dom->Gfz.je-1;
if(k > dom->Gfz.ke-1) k = dom->Gfz.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-DOM_BUF) * dom->dz + dom->zs;
C = i + j*dom->Gfz.s1b + k*dom->Gfz.s2b;
real dwdx = 0.5*(w[C+1] - w[C-1]) * ddx;
real dwdy = 0.5*(w[C+dom->Gfz.s1b] - w[C-dom->Gfz.s1b]) * ddy;
real dwdz = 0.5*(w[C+dom->Gfz.s2b] - w[C-dom->Gfz.s2b]) * ddz;
ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz);
// set uuwall equal to interfering wall u-velocity
wwwall = (parts[part].nodes[node] == -10)*bc.wWD
+ (parts[part].nodes[node] == -11)*bc.wED
+ (parts[part].nodes[node] == -12)*bc.wSD
+ (parts[part].nodes[node] == -13)*bc.wND
+ (parts[part].nodes[node] == -14)*bc.wBD
+ (parts[part].nodes[node] == -15)*bc.wTD;
// switch to particle rest frame
real ocrossr_z = ox*yp - oy*xp;
real odotcrossr_z = oxdot*yp - oydot*xp;
ww -= parts[part].w + ocrossr_z;
ww -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
wwwall -= parts[part].w + ocrossr_z;
wwwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
// set actual node value based on whether it is interfered with
ww = (parts[part].nodes[node]==-1)*ww
+ (parts[part].nodes[node]<-1)*wwwall;
// convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays
cart2sphere(uu, vv, ww, theta[node], phi[node],
&ur[node+part*nnodes], &ut[node+part*nnodes], &up[node+part*nnodes]);
}
__device__ real nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for(int i = 1; i <= (n-m); i++) fact_top *= (real)i;
for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i;
return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
//case -1: return -0.5*y;
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
//case -2: return 0.125*y*y;
//case -1: return -0.5*x*y;
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
//case -3: return -0.02083333333333*y*y*y;
//case -2: return 0.125*x*y*y;
//case -1: return -0.125*(1. - 5.*x*x)*y;
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
//case -4: return .002604166666667*y*y*y*y;
//case -3: return -0.02083333333333*x*y*y*y*y;
//case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
//case -1: return -0.125*x*(3. - 7.*x*x)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
//case -5: return -0.000260416666667*y*y*y*y*y;
//case -4: return 0.002604166666667*x*y*y*y*y;
//case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.);
//case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
//case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__global__ void cuda_get_coeffs(part_struct *parts,
int *nn, int *mm, real *node_t, real *node_p,
real *pp, real *ur, real *ut, real *up, real mu, real nu,
int stride, real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im,
int nnodes, int ncoeffs, real A1, real A2, real A3, real B,
real *pnm_re0, real *pnm_im0,
real *phinm_re0, real *phinm_im0,
real *chinm_re0, real *chinm_im0,
real lambrelax)
{
int node = threadIdx.x;
int part = blockIdx.x;
int coeff = blockIdx.y;
real ars = parts[part].r / parts[part].rs;
real rsa = parts[part].rs / parts[part].r;
int i; // iterator
if(coeff < parts[part].ncoeff) {
// calculate integrand at each node
int j = part*nnodes*ncoeffs + coeff*nnodes + node;
int n = nn[coeff];
int m = mm[coeff];
real theta = node_t[node];
real phi = node_p[node];
real N_nm = nnm(n,m);
real P_nm = pnm(n,m,theta);
real P_n1m = pnm(n+1.,m,theta);
real dPdt = (n-m+1.)*P_n1m-(n+1.)*cos(theta)*P_nm;
real dPdp = m*P_nm;
int_Yp_re[j] = N_nm*P_nm*pp[node+part*nnodes]*cos(m*phi);
int_Yp_im[j] = -N_nm*P_nm*pp[node+part*nnodes]*sin(m*phi);
int_rDYu_re[j] = N_nm/sin(theta)*(dPdt*ut[node+part*nnodes]*cos(m*phi)
- dPdp*up[node+part*nnodes]*sin(m*phi));
int_rDYu_im[j] = N_nm/sin(theta)*(-dPdt*ut[node+part*nnodes]*sin(m*phi)
- dPdp*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_re[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*sin(m*phi)
+ dPdt*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_im[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*cos(m*phi)
- dPdt*up[node+part*nnodes]*sin(m*phi));
__syncthreads();
// compute scalar products
// put sum into first node position for each coeff for each particle
if(node == 0) {
int_Yp_re[j] *= A1;
int_Yp_im[j] *= A1;
int_rDYu_re[j] *= A1;
int_rDYu_im[j] *= A1;
int_xXDYu_re[j] *= A1;
int_xXDYu_im[j] *= A1;
for(i = 1; i < 6; i++) {
int_Yp_re[j] += A1 * int_Yp_re[j+i];
int_Yp_im[j] += A1 * int_Yp_im[j+i];
int_rDYu_re[j] += A1 * int_rDYu_re[j+i];
int_rDYu_im[j] += A1 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A1 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A1 * int_xXDYu_im[j+i];
}
for(i = 6; i < 18; i++) {
int_Yp_re[j] += A2 * int_Yp_re[j+i];
int_Yp_im[j] += A2 * int_Yp_im[j+i];
int_rDYu_re[j] += A2 * int_rDYu_re[j+i];
int_rDYu_im[j] += A2 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A2 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A2 * int_xXDYu_im[j+i];
}
for(i = 18; i < 26; i++) {
int_Yp_re[j] += A3 * int_Yp_re[j+i];
int_Yp_im[j] += A3 * int_Yp_im[j+i];
int_rDYu_re[j] += A3 * int_rDYu_re[j+i];
int_rDYu_im[j] += A3 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A3 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A3 * int_xXDYu_im[j+i];
}
/*for(i = 26; i < 50; i++) {
int_Yp_re[j] += B * int_Yp_re[j+i];
int_Yp_im[j] += B * int_Yp_im[j+i];
int_rDYu_re[j] += B * int_rDYu_re[j+i];
int_rDYu_im[j] += B * int_rDYu_im[j+i];
int_xXDYu_re[j] += B * int_xXDYu_re[j+i];
int_xXDYu_im[j] += B * int_xXDYu_im[j+i];
}
*/
#ifdef TEST
real relax = 1.0;
#else
real relax = lambrelax;
#endif
if(n == 0) {
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*pow(ars,n)
- pnm_re0[stride*part+coeff]);
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*pow(ars,n)
- pnm_im0[stride*part+coeff]);
phinm_re[stride*part+coeff] = 0.;
phinm_im[stride*part+coeff] = 0.;
chinm_re[stride*part+coeff] = 0.;
chinm_im[stride*part+coeff] = 0.;
} else {
// calculate p_nm and phi_nm
real A = (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ars,2.*n+1.))*pow(rsa,n);
real B = n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ars,n+1.);
real C = 0.25*n*(2.*(n+3.)/(2.*n+3.)
+ (n-2.-n*(2.*n+1.)/(2.*n+3.)*ars*ars)*pow(ars,2.*n+1.))*pow(rsa,n+1);
real D = n*(n+1.+0.5*((n-2.)*(2.*n+1.)*rsa*rsa
- n*(2.*n-1.))*pow(ars,2.*n+1.))*pow(rsa,n-1.);
pnm_re[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_re[j]*D + parts[part].r/nu*int_rDYu_re[j]*B) / (A*D+B*C);
pnm_im[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_im[j]*D + parts[part].r/nu*int_rDYu_im[j]*B) / (A*D+B*C);
phinm_re[stride*part+coeff] = (parts[part].r/nu*int_rDYu_re[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*C) / (A*D+B*C);
phinm_im[stride*part+coeff] = (parts[part].r/nu*int_rDYu_im[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*C) / (A*D+B*C);
// calculate chi_nm
real E = n*(n+1.)*(pow(ars,2.*n+1.)-1.)*pow(rsa, n);
chinm_re[stride*part+coeff] = parts[part].r/nu*int_xXDYu_re[j] / E;
chinm_im[stride*part+coeff] = parts[part].r/nu*int_xXDYu_im[j] / E;
// apply underrelaxation
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]*(1.-relax)
+ relax*pnm_re[stride*part+coeff];
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]*(1.-relax)
+ relax*pnm_im[stride*part+coeff];
phinm_re[stride*part+coeff] = phinm_re0[stride*part+coeff]*(1.-relax)
+ relax*phinm_re[stride*part+coeff];
phinm_im[stride*part+coeff] = phinm_im0[stride*part+coeff]*(1.-relax)
+ relax*phinm_im[stride*part+coeff];
chinm_re[stride*part+coeff] = chinm_re0[stride*part+coeff]*(1.-relax)
+ relax*chinm_re[stride*part+coeff];
chinm_im[stride*part+coeff] = chinm_im0[stride*part+coeff]*(1.-relax)
+ relax*chinm_im[stride*part+coeff];
}
}
}
}
__global__ void cuda_calc_forces(dom_struct *dom, part_struct *parts,
int nparts, gradP_struct gradP,
real rho_f, real mu, real nu, int stride,
real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number
if(pp < nparts) {
real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r;
real N10 = sqrt(3./4./PI);
real N11 = sqrt(3./8./PI);
parts[pp].Fx = rho_f * vol * (parts[pp].udot + gradP.x)
- PI * mu * nu * 2.*N11 * (pnm_re[stride*pp + 2]
+ 6.*phinm_re[stride*pp + 2]);
parts[pp].Fy = rho_f * vol * (parts[pp].vdot + gradP.y)
+ PI * mu * nu * 2.*N11 * (pnm_im[stride*pp + 2]
+ 6.*phinm_im[stride*pp + 2]);
parts[pp].Fz = rho_f * vol * (parts[pp].wdot + gradP.z)
+ PI * mu * nu * N10 * (pnm_re[stride*pp + 1]
+ 6.*phinm_re[stride*pp + 1]);
parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot
- 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_re[stride*pp + 2];
parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot
+ 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_im[stride*pp + 2];
parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot
+ 8. * PI * mu * nu * N10 * parts[pp].r * chinm_re[stride*pp + 1];
}
}
__global__ void compute_error(real lamb_cut, int stride, int nparts,
real *pnm_re, real *pnm_re0, real *pnm_im, real *pnm_im0,
real *phinm_re, real *phinm_re0, real *phinm_im, real *phinm_im0,
real *chinm_re, real *chinm_re0, real *chinm_im, real *chinm_im0,
real *coeffs, real *errors, real *part_errors, dom_struct *dom, real nu)
{
int part = blockIdx.x;
int i,j;
real tmp = FLT_MIN;
int loc = 0;
real avg = 0;
real div = 0;
// create shared memory space
__shared__ real s_coeffs[6*21]; // ** have to hard-code this length **
__shared__ real s_coeffs0[6*21]; // ** have to hard-code this length **
// using 6 coefficient sets, each holding
// a maximum of 21 coefficients (5th-order
// truncation)
// copy coeffs for this particle into shared memory
for(i = 0; i < stride; i++) {
s_coeffs[i] = pnm_re[part*stride+i];
s_coeffs[i+1*stride] = pnm_im[part*stride+i];
s_coeffs[i+2*stride] = phinm_re[part*stride+i];
s_coeffs[i+3*stride] = phinm_im[part*stride+i];
s_coeffs[i+4*stride] = chinm_re[part*stride+i];
s_coeffs[i+5*stride] = chinm_im[part*stride+i];
s_coeffs0[i] = pnm_re0[part*stride+i];
s_coeffs0[i+1*stride] = pnm_im0[part*stride+i];
s_coeffs0[i+2*stride] = phinm_re0[part*stride+i];
s_coeffs0[i+3*stride] = phinm_im0[part*stride+i];
s_coeffs0[i+4*stride] = chinm_re0[part*stride+i];
s_coeffs0[i+5*stride] = chinm_im0[part*stride+i];
}
// compute the average of the coefficients
for(i = 0; i < stride*6; i++) {
avg += s_coeffs[i]*s_coeffs[i];
}
avg = avg / (stride*6.);
// sort the coefficients in shared memory and calculate errors along the way
for(i = 0; i < 6*stride; i++) {
// search for the largest magnitude value in shared and store its location
tmp = FLT_MIN;
for(j = 0; j < 6*stride; j++) {
if(s_coeffs[j]*s_coeffs[j] > tmp) {
tmp = s_coeffs[j]*s_coeffs[j];
loc = j;
}
}
// move the largest value into sorted list
coeffs[part*stride+i] = s_coeffs[loc];
// if its corresponding coefficient has large enough magnitude,
// compute error for this coefficient
if(fabs(s_coeffs[loc]) > lamb_cut*fabs(coeffs[part*stride+0])) {
div = fabs(s_coeffs[loc]);// + fabs(avg)*1e-4;
if(div < 1e-16) div = 1e-16;
errors[part*stride+i] = fabs((s_coeffs[loc] - s_coeffs0[loc]) / div);
} else errors[part*stride+i] = 0.;
// discard this value since we've used it once
s_coeffs[loc] = 0.;
}
// find the largest error for each particle
tmp = FLT_MIN;
for(i = 0; i < 6*stride; i++) {
if(errors[part*stride+i] > tmp) tmp = errors[part*stride+i];
}
// write error to return for each particle
part_errors[part] = tmp;
}
| 21807bbdcd718860a4393d1c835930537f4e7d05.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2015 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_quadrature.h"
__device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z)
{
*x = r * sin(theta) * cos(phi);
*y = r * sin(theta) * sin(phi);
*z = r * cos(theta);
}
__device__ void cart2sphere(real u, real v, real w, real theta, real phi,
real *ur, real *ut, real *up)
{
real st = sin(theta);
real ct = cos(theta);
real sp = sin(phi);
real cp = cos(phi);
*ur = st * (u * cp + v * sp) + w * ct;
*ut = ct * (u * cp + v * sp) - w * st;
*up = -u * sp + v * cp;
}
__global__ void check_nodes(int nparts, part_struct *parts, dom_struct *dom,
real *theta, real *phi, int nnodes, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
// start off with all -1's
parts[part].nodes[node] = -1;
// check if the node is interfered with by a wall
// compute distance between node and walls
// set equal to some number to identify which wall is interefering
if(x - dom->xs < 0) {
if(bc.uW == DIRICHLET || bc.vW == DIRICHLET || bc.wW == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -10;
} if(x - dom->xe > 0) {
if(bc.uE == DIRICHLET || bc.vE == DIRICHLET || bc.wE == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -11;
} if(y - dom->ys < 0) {
if(bc.uS == DIRICHLET || bc.vS == DIRICHLET || bc.wS == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -12;
} if(y - dom->ye > 0) {
if(bc.uN == DIRICHLET || bc.vN == DIRICHLET || bc.wN == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -13;
} if(z - dom->zs < 0) {
if(bc.uB == DIRICHLET || bc.vB == DIRICHLET || bc.wB == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -14;
} if(z - dom->ze > 0) {
if(bc.uT == DIRICHLET || bc.vT == DIRICHLET || bc.wT == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -15;
}
}
__global__ void interpolate_nodes(real *p0, real *p, real *u, real *v, real *w,
real rho_f, real nu, gradP_struct gradP,
part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes,
real *pp, real *ur, real *ut, real *up, real dt0, real dt, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// the node number of the intersecting node
int intnode = parts[part].nodes[node];
if(intnode < 0) intnode = part;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
real ox = parts[part].ox;
real oy = parts[part].oy;
real oz = parts[part].oz;
real oxdot = parts[part].oxdot;
real oydot = parts[part].oydot;
real ozdot = parts[part].ozdot;
real udot = parts[part].udot;
real vdot = parts[part].vdot;
real wdot = parts[part].wdot;
real uu, vv, ww; // temporary nodes for Cartesian result of interpolation
real uuwall, vvwall, wwwall;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl;
else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl;
if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl;
else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl;
if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl;
else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl;
__syncthreads();
// find index of cell containing node
int i = floor((x - dom->xs) * ddx) + DOM_BUF;
int j = floor((y - dom->ys) * ddy) + DOM_BUF;
int k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gcc.is) i = dom->Gcc.is;
if(j < dom->Gcc.js) j = dom->Gcc.js;
if(k < dom->Gcc.ks) k = dom->Gcc.ks;
if(i > dom->Gcc.ie-1) i = dom->Gcc.ie-1;
if(j > dom->Gcc.je-1) j = dom->Gcc.je-1;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ke-1;
int C = i + j*dom->Gcc.s1b + k*dom->Gcc.s2b;
// Cartesian location of center of cell
real xx = (i-0.5) * dom->dx + dom->xs;
real yy = (j-0.5) * dom->dy + dom->ys;
real zz = (k-0.5) * dom->dz + dom->zs;
// interpolate pressure
real pc = p[C];
real pw = p[C-1];
real pe = p[C+1];
real ps = p[C-dom->Gcc.s1b];
real pn = p[C+dom->Gcc.s1b];
real pb = p[C-dom->Gcc.s2b];
real pt = p[C+dom->Gcc.s2b];
real dpdx = 0.5*(pe - pw) * ddx;
real dpdy = 0.5*(pn - ps) * ddy;
real dpdz = 0.5*(pt - pb) * ddz;
pp[node+nnodes*part] = pc + dpdx*(x-xx) + dpdy*(y-yy) + dpdz*(z-zz);
// switch to particle rest frame
real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp);
ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp);
ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*xp + (-gradP.y/rhoV - vdot)*yp
+ (-gradP.z/rhoV - wdot)*zp;
pp[node+nnodes*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// zero if this node intersects wall
pp[node+nnodes*part] = (parts[part].nodes[node]==-1)*pp[node+part*nnodes];
// interpolate velocities
// don't work with cell-center anymore;
// find closest cell face in x-direction
// interpolate u-velocity
i = round((x - dom->xs) * ddx - 0.5) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfx.is) i = dom->Gfx.is;
if(j < dom->Gfx.js) j = dom->Gfx.js;
if(k < dom->Gfx.ks) k = dom->Gfx.ks;
if(i > dom->Gfx.ie-1) i = dom->Gfx.ie-1;
if(j > dom->Gfx.je-1) j = dom->Gfx.je-1;
if(k > dom->Gfx.ke-1) k = dom->Gfx.ke-1;
xx = (i-DOM_BUF) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfx.s1b + k*dom->Gfx.s2b;
real dudx = 0.5*(u[C+1] - u[C-1]) * ddx;
real dudy = 0.5*(u[C+dom->Gfx.s1b] - u[C-dom->Gfx.s1b]) * ddy;
real dudz = 0.5*(u[C+dom->Gfx.s2b] - u[C-dom->Gfx.s2b]) * ddz;
uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz);
// set uuwall equal to interfering wall u-velocity
uuwall = (parts[part].nodes[node] == -10)*bc.uWD
+ (parts[part].nodes[node] == -11)*bc.uED
+ (parts[part].nodes[node] == -12)*bc.uSD
+ (parts[part].nodes[node] == -13)*bc.uND
+ (parts[part].nodes[node] == -14)*bc.uBD
+ (parts[part].nodes[node] == -15)*bc.uTD;
// switch to particle rest frame
real rs3 = parts[part].rs*parts[part].rs*parts[part].rs;
real rs5 = rs3*parts[part].rs*parts[part].rs;
real a5 = parts[part].r*parts[part].r*parts[part].r*parts[part].r*parts[part].r;
real ocrossr_x = oy*zp - oz*yp;
real odotcrossr_x = oydot*zp - ozdot*yp;
uu -= parts[part].u + ocrossr_x;
uu -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
uuwall -= parts[part].u + ocrossr_x;
uuwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
// set actual node value based on whether it is interfered with
uu = (parts[part].nodes[node]==-1)*uu
+ (parts[part].nodes[node]<-1)*uuwall;
//printf("uu = %f uuwall = %f\n", uu + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x, uuwall + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x);
// interpolate v-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = round((y - dom->ys) * ddy - 0.5) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfy.is) i = dom->Gfy.is;
if(j < dom->Gfy.js) j = dom->Gfy.js;
if(k < dom->Gfy.ks) k = dom->Gfy.ks;
if(i > dom->Gfy.ie-1) i = dom->Gfy.ie-1;
if(j > dom->Gfy.je-1) j = dom->Gfy.je-1;
if(k > dom->Gfy.ke-1) k = dom->Gfy.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-DOM_BUF) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfy.s1b + k*dom->Gfy.s2b;
real dvdx = 0.5*(v[C+1] - v[C-1]) * ddx;
real dvdy = 0.5*(v[C+dom->Gfy.s1b] - v[C-dom->Gfy.s1b]) * ddy;
real dvdz = 0.5*(v[C+dom->Gfy.s2b] - v[C-dom->Gfy.s2b]) * ddz;
vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz);
// set vvwall equal to interfering wall v-velocity
vvwall = (parts[part].nodes[node] == -10)*bc.vWD
+ (parts[part].nodes[node] == -11)*bc.vED
+ (parts[part].nodes[node] == -12)*bc.vSD
+ (parts[part].nodes[node] == -13)*bc.vND
+ (parts[part].nodes[node] == -14)*bc.vBD
+ (parts[part].nodes[node] == -15)*bc.vTD;
// switch to particle rest frame
real ocrossr_y = -(ox*zp - oz*xp);
real odotcrossr_y = -(oxdot*zp - ozdot*xp);
vv -= parts[part].v + ocrossr_y;
vv -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
vvwall -= parts[part].v + ocrossr_y;
vvwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
// set actual node value based on whether it is interfered with
vv = (parts[part].nodes[node]==-1)*vv
+ (parts[part].nodes[node]<-1)*vvwall;
// interpolate w-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = round((z - dom->zs) * ddz - 0.5) + DOM_BUF;
if(i < dom->Gfz.is) i = dom->Gfz.is;
if(j < dom->Gfz.js) j = dom->Gfz.js;
if(k < dom->Gfz.ks) k = dom->Gfz.ks;
if(i > dom->Gfz.ie-1) i = dom->Gfz.ie-1;
if(j > dom->Gfz.je-1) j = dom->Gfz.je-1;
if(k > dom->Gfz.ke-1) k = dom->Gfz.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-DOM_BUF) * dom->dz + dom->zs;
C = i + j*dom->Gfz.s1b + k*dom->Gfz.s2b;
real dwdx = 0.5*(w[C+1] - w[C-1]) * ddx;
real dwdy = 0.5*(w[C+dom->Gfz.s1b] - w[C-dom->Gfz.s1b]) * ddy;
real dwdz = 0.5*(w[C+dom->Gfz.s2b] - w[C-dom->Gfz.s2b]) * ddz;
ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz);
// set uuwall equal to interfering wall u-velocity
wwwall = (parts[part].nodes[node] == -10)*bc.wWD
+ (parts[part].nodes[node] == -11)*bc.wED
+ (parts[part].nodes[node] == -12)*bc.wSD
+ (parts[part].nodes[node] == -13)*bc.wND
+ (parts[part].nodes[node] == -14)*bc.wBD
+ (parts[part].nodes[node] == -15)*bc.wTD;
// switch to particle rest frame
real ocrossr_z = ox*yp - oy*xp;
real odotcrossr_z = oxdot*yp - oydot*xp;
ww -= parts[part].w + ocrossr_z;
ww -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
wwwall -= parts[part].w + ocrossr_z;
wwwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
// set actual node value based on whether it is interfered with
ww = (parts[part].nodes[node]==-1)*ww
+ (parts[part].nodes[node]<-1)*wwwall;
// convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays
cart2sphere(uu, vv, ww, theta[node], phi[node],
&ur[node+part*nnodes], &ut[node+part*nnodes], &up[node+part*nnodes]);
}
__device__ real nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for(int i = 1; i <= (n-m); i++) fact_top *= (real)i;
for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i;
return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
//case -1: return -0.5*y;
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
//case -2: return 0.125*y*y;
//case -1: return -0.5*x*y;
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
//case -3: return -0.02083333333333*y*y*y;
//case -2: return 0.125*x*y*y;
//case -1: return -0.125*(1. - 5.*x*x)*y;
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
//case -4: return .002604166666667*y*y*y*y;
//case -3: return -0.02083333333333*x*y*y*y*y;
//case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
//case -1: return -0.125*x*(3. - 7.*x*x)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
//case -5: return -0.000260416666667*y*y*y*y*y;
//case -4: return 0.002604166666667*x*y*y*y*y;
//case -3: return -0.002604166666667*y*y*y*(9.*x*x - 1.);
//case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
//case -1: return -0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__global__ void cuda_get_coeffs(part_struct *parts,
int *nn, int *mm, real *node_t, real *node_p,
real *pp, real *ur, real *ut, real *up, real mu, real nu,
int stride, real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im,
int nnodes, int ncoeffs, real A1, real A2, real A3, real B,
real *pnm_re0, real *pnm_im0,
real *phinm_re0, real *phinm_im0,
real *chinm_re0, real *chinm_im0,
real lambrelax)
{
int node = threadIdx.x;
int part = blockIdx.x;
int coeff = blockIdx.y;
real ars = parts[part].r / parts[part].rs;
real rsa = parts[part].rs / parts[part].r;
int i; // iterator
if(coeff < parts[part].ncoeff) {
// calculate integrand at each node
int j = part*nnodes*ncoeffs + coeff*nnodes + node;
int n = nn[coeff];
int m = mm[coeff];
real theta = node_t[node];
real phi = node_p[node];
real N_nm = nnm(n,m);
real P_nm = pnm(n,m,theta);
real P_n1m = pnm(n+1.,m,theta);
real dPdt = (n-m+1.)*P_n1m-(n+1.)*cos(theta)*P_nm;
real dPdp = m*P_nm;
int_Yp_re[j] = N_nm*P_nm*pp[node+part*nnodes]*cos(m*phi);
int_Yp_im[j] = -N_nm*P_nm*pp[node+part*nnodes]*sin(m*phi);
int_rDYu_re[j] = N_nm/sin(theta)*(dPdt*ut[node+part*nnodes]*cos(m*phi)
- dPdp*up[node+part*nnodes]*sin(m*phi));
int_rDYu_im[j] = N_nm/sin(theta)*(-dPdt*ut[node+part*nnodes]*sin(m*phi)
- dPdp*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_re[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*sin(m*phi)
+ dPdt*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_im[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*cos(m*phi)
- dPdt*up[node+part*nnodes]*sin(m*phi));
__syncthreads();
// compute scalar products
// put sum into first node position for each coeff for each particle
if(node == 0) {
int_Yp_re[j] *= A1;
int_Yp_im[j] *= A1;
int_rDYu_re[j] *= A1;
int_rDYu_im[j] *= A1;
int_xXDYu_re[j] *= A1;
int_xXDYu_im[j] *= A1;
for(i = 1; i < 6; i++) {
int_Yp_re[j] += A1 * int_Yp_re[j+i];
int_Yp_im[j] += A1 * int_Yp_im[j+i];
int_rDYu_re[j] += A1 * int_rDYu_re[j+i];
int_rDYu_im[j] += A1 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A1 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A1 * int_xXDYu_im[j+i];
}
for(i = 6; i < 18; i++) {
int_Yp_re[j] += A2 * int_Yp_re[j+i];
int_Yp_im[j] += A2 * int_Yp_im[j+i];
int_rDYu_re[j] += A2 * int_rDYu_re[j+i];
int_rDYu_im[j] += A2 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A2 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A2 * int_xXDYu_im[j+i];
}
for(i = 18; i < 26; i++) {
int_Yp_re[j] += A3 * int_Yp_re[j+i];
int_Yp_im[j] += A3 * int_Yp_im[j+i];
int_rDYu_re[j] += A3 * int_rDYu_re[j+i];
int_rDYu_im[j] += A3 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A3 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A3 * int_xXDYu_im[j+i];
}
/*for(i = 26; i < 50; i++) {
int_Yp_re[j] += B * int_Yp_re[j+i];
int_Yp_im[j] += B * int_Yp_im[j+i];
int_rDYu_re[j] += B * int_rDYu_re[j+i];
int_rDYu_im[j] += B * int_rDYu_im[j+i];
int_xXDYu_re[j] += B * int_xXDYu_re[j+i];
int_xXDYu_im[j] += B * int_xXDYu_im[j+i];
}
*/
#ifdef TEST
real relax = 1.0;
#else
real relax = lambrelax;
#endif
if(n == 0) {
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*pow(ars,n)
- pnm_re0[stride*part+coeff]);
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*pow(ars,n)
- pnm_im0[stride*part+coeff]);
phinm_re[stride*part+coeff] = 0.;
phinm_im[stride*part+coeff] = 0.;
chinm_re[stride*part+coeff] = 0.;
chinm_im[stride*part+coeff] = 0.;
} else {
// calculate p_nm and phi_nm
real A = (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ars,2.*n+1.))*pow(rsa,n);
real B = n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ars,n+1.);
real C = 0.25*n*(2.*(n+3.)/(2.*n+3.)
+ (n-2.-n*(2.*n+1.)/(2.*n+3.)*ars*ars)*pow(ars,2.*n+1.))*pow(rsa,n+1);
real D = n*(n+1.+0.5*((n-2.)*(2.*n+1.)*rsa*rsa
- n*(2.*n-1.))*pow(ars,2.*n+1.))*pow(rsa,n-1.);
pnm_re[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_re[j]*D + parts[part].r/nu*int_rDYu_re[j]*B) / (A*D+B*C);
pnm_im[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_im[j]*D + parts[part].r/nu*int_rDYu_im[j]*B) / (A*D+B*C);
phinm_re[stride*part+coeff] = (parts[part].r/nu*int_rDYu_re[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*C) / (A*D+B*C);
phinm_im[stride*part+coeff] = (parts[part].r/nu*int_rDYu_im[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*C) / (A*D+B*C);
// calculate chi_nm
real E = n*(n+1.)*(pow(ars,2.*n+1.)-1.)*pow(rsa, n);
chinm_re[stride*part+coeff] = parts[part].r/nu*int_xXDYu_re[j] / E;
chinm_im[stride*part+coeff] = parts[part].r/nu*int_xXDYu_im[j] / E;
// apply underrelaxation
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]*(1.-relax)
+ relax*pnm_re[stride*part+coeff];
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]*(1.-relax)
+ relax*pnm_im[stride*part+coeff];
phinm_re[stride*part+coeff] = phinm_re0[stride*part+coeff]*(1.-relax)
+ relax*phinm_re[stride*part+coeff];
phinm_im[stride*part+coeff] = phinm_im0[stride*part+coeff]*(1.-relax)
+ relax*phinm_im[stride*part+coeff];
chinm_re[stride*part+coeff] = chinm_re0[stride*part+coeff]*(1.-relax)
+ relax*chinm_re[stride*part+coeff];
chinm_im[stride*part+coeff] = chinm_im0[stride*part+coeff]*(1.-relax)
+ relax*chinm_im[stride*part+coeff];
}
}
}
}
__global__ void cuda_calc_forces(dom_struct *dom, part_struct *parts,
int nparts, gradP_struct gradP,
real rho_f, real mu, real nu, int stride,
real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number
if(pp < nparts) {
real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r;
real N10 = sqrt(3./4./PI);
real N11 = sqrt(3./8./PI);
parts[pp].Fx = rho_f * vol * (parts[pp].udot + gradP.x)
- PI * mu * nu * 2.*N11 * (pnm_re[stride*pp + 2]
+ 6.*phinm_re[stride*pp + 2]);
parts[pp].Fy = rho_f * vol * (parts[pp].vdot + gradP.y)
+ PI * mu * nu * 2.*N11 * (pnm_im[stride*pp + 2]
+ 6.*phinm_im[stride*pp + 2]);
parts[pp].Fz = rho_f * vol * (parts[pp].wdot + gradP.z)
+ PI * mu * nu * N10 * (pnm_re[stride*pp + 1]
+ 6.*phinm_re[stride*pp + 1]);
parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot
- 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_re[stride*pp + 2];
parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot
+ 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_im[stride*pp + 2];
parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot
+ 8. * PI * mu * nu * N10 * parts[pp].r * chinm_re[stride*pp + 1];
}
}
__global__ void compute_error(real lamb_cut, int stride, int nparts,
real *pnm_re, real *pnm_re0, real *pnm_im, real *pnm_im0,
real *phinm_re, real *phinm_re0, real *phinm_im, real *phinm_im0,
real *chinm_re, real *chinm_re0, real *chinm_im, real *chinm_im0,
real *coeffs, real *errors, real *part_errors, dom_struct *dom, real nu)
{
int part = blockIdx.x;
int i,j;
real tmp = FLT_MIN;
int loc = 0;
real avg = 0;
real div = 0;
// create shared memory space
__shared__ real s_coeffs[6*21]; // ** have to hard-code this length **
__shared__ real s_coeffs0[6*21]; // ** have to hard-code this length **
// using 6 coefficient sets, each holding
// a maximum of 21 coefficients (5th-order
// truncation)
// copy coeffs for this particle into shared memory
for(i = 0; i < stride; i++) {
s_coeffs[i] = pnm_re[part*stride+i];
s_coeffs[i+1*stride] = pnm_im[part*stride+i];
s_coeffs[i+2*stride] = phinm_re[part*stride+i];
s_coeffs[i+3*stride] = phinm_im[part*stride+i];
s_coeffs[i+4*stride] = chinm_re[part*stride+i];
s_coeffs[i+5*stride] = chinm_im[part*stride+i];
s_coeffs0[i] = pnm_re0[part*stride+i];
s_coeffs0[i+1*stride] = pnm_im0[part*stride+i];
s_coeffs0[i+2*stride] = phinm_re0[part*stride+i];
s_coeffs0[i+3*stride] = phinm_im0[part*stride+i];
s_coeffs0[i+4*stride] = chinm_re0[part*stride+i];
s_coeffs0[i+5*stride] = chinm_im0[part*stride+i];
}
// compute the average of the coefficients
for(i = 0; i < stride*6; i++) {
avg += s_coeffs[i]*s_coeffs[i];
}
avg = avg / (stride*6.);
// sort the coefficients in shared memory and calculate errors along the way
for(i = 0; i < 6*stride; i++) {
// search for the largest magnitude value in shared and store its location
tmp = FLT_MIN;
for(j = 0; j < 6*stride; j++) {
if(s_coeffs[j]*s_coeffs[j] > tmp) {
tmp = s_coeffs[j]*s_coeffs[j];
loc = j;
}
}
// move the largest value into sorted list
coeffs[part*stride+i] = s_coeffs[loc];
// if its corresponding coefficient has large enough magnitude,
// compute error for this coefficient
if(fabs(s_coeffs[loc]) > lamb_cut*fabs(coeffs[part*stride+0])) {
div = fabs(s_coeffs[loc]);// + fabs(avg)*1e-4;
if(div < 1e-16) div = 1e-16;
errors[part*stride+i] = fabs((s_coeffs[loc] - s_coeffs0[loc]) / div);
} else errors[part*stride+i] = 0.;
// discard this value since we've used it once
s_coeffs[loc] = 0.;
}
// find the largest error for each particle
tmp = FLT_MIN;
for(i = 0; i < 6*stride; i++) {
if(errors[part*stride+i] > tmp) tmp = errors[part*stride+i];
}
// write error to return for each particle
part_errors[part] = tmp;
}
|
9cb63e78201cb15956bbe283d58cc9c178292b3b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* dgemm_gpu_shared.cu
*
* compile with: make dgemm_gpu_shared_uvm
*
* Matrices are stored as array in row-major order:
* A[row][col] = A[row * N + col]
*
* Use shared memory to speed up the matrix multiplication. We can reuse
* the memory if we load a block of the matrix and have a thread block
* calculate a sub matrix.
*/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
// Thread block size: BLOCK_SIZE * BLOCK_SIZE
#define BLOCK_SIZE 16
// Declaration of helper functions (see bottom of file for details)
void checkError (const char* action);
float getGflops (int, float);
/*
* Matrix multiplication kernel called by matrixMulOnDevice()
*/
__global__ void dgemm_gpu_shared(double* a, double* b, double* c, int n){
// Allocate shared memory for the two blocks aSub and bSub.
// Use two-dimensional matrices of size BLOCK_SIZE * BLOCK_SIZE
__shared__ double aSub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double bSub[BLOCK_SIZE][BLOCK_SIZE];
// TODO: Calculate global thread index
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = blockDim.y * blockIdx.y + threadIdx.y;
// For the matrix multiplication, we need to multiply all the elements of
// the idxYth row of a with all the elements of the idXth column of b and
// sum up the results.
double sum = 0;
// Calculate global offset of upper left corner of thread block.
int blockaY = blockIdx.y * BLOCK_SIZE;
int blockbX = blockIdx.x * BLOCK_SIZE;
for (int block = 0; block < gridDim.x; ++block){
// Get the two sub matrices
int blockaX = block * (BLOCK_SIZE);
int blockbY = block * (BLOCK_SIZE);
if (((blockaY + threadIdx.y) < n) && (blockaX + threadIdx.x) < n) {
// TODO: Copy block into shared memory
aSub[threadIdx.y][threadIdx.x] = a[(blockaY + threadIdx.y) * n + blockaX + threadIdx.x];
} else {
aSub[threadIdx.y][threadIdx.x] = 0;
}
if (((blockbY + threadIdx.y) < n) && (blockbX + threadIdx.x) < n) {
bSub[threadIdx.y][threadIdx.x] = b[(blockbY + threadIdx.y) * n + blockbX + threadIdx.x];
} else {
bSub[threadIdx.y][threadIdx.x] = 0;
}
__syncthreads(); // Make sure that all threads had time to read the sub matrix.
if ((idxX < n) && (idxY < n))
{
for (int i=0; i < blockDim.x; ++i){ //assumes that we use square blocks
sum += aSub[threadIdx.y][i] * bSub[i][threadIdx.x];
}
}
__syncthreads();
}
if ((idxX < n) && (idxY < n)){
c[idxY * n + idxX] = sum;
}
}
/*
* Matrix multiplication host function called by main()
*/
void matrixMulOnDevice(double* a, double* b, double* c, int n)
{
int xGrid, yGrid;
float time;
// Define events for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// First calculate grid size by dividing n by BLOCK_SIZE = 16
xGrid = (n % BLOCK_SIZE == 0) ? (n / BLOCK_SIZE) : (n / BLOCK_SIZE + 1);
yGrid = (n % BLOCK_SIZE == 0) ? (n / BLOCK_SIZE) : (n / BLOCK_SIZE + 1);
dim3 gridDim(xGrid, yGrid);
dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE);
printf("Grid: %d, %d; block:%d, %d\n", xGrid , yGrid , BLOCK_SIZE, BLOCK_SIZE);
// Invoke kernel and measure execution time
hipEventRecord( start, 0 );
// TODO: Call the kernel
hipLaunchKernelGGL(( dgemm_gpu_shared), dim3(gridDim), dim3(blockDim), 0, 0, a, b, c, n);
hipDeviceSynchronize();
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
checkError("executing Kernel");
// Get elapsed time for kernel execution
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf ("\nKernel Execution Time: %f ms (dim C: %d * %d)", time, n, n);
printf ("\nThis corresponds to: %4.4f GFLOPS", getGflops(n, time));
}
int main(int argc, char** argv)
{
int n = 1024;
double *a, *b, *c;
int row, col;
double absError, maxAbsError = 0.0, sumAbsError = 0.0;
size_t size;
if (argc > 1) {
n = atoi(argv[1]);
}
// show banner
printf ("\n\n Matrix-Multiplication \n");
printf ( " ==========================================\n");
printf ( "\n Simple DGEMM implemantation on GPU");
// echo device data
int idevice = 0;
hipSetDevice(idevice);
hipDeviceProp_t dprops;
hipGetDeviceProperties( &dprops, idevice );
printf ("\n Device name = %s, with compute capability %d.%d \n",
dprops.name, dprops.major, dprops.minor);
printf ( "\n Matrix size %d x %d", n, n);
// Allocate memory for matrices (that can be accessed from host and device)
size = n * n * sizeof(double);
hipMallocManaged((void**)&a, size);
checkError("allocating UVM memory for A");
hipMallocManaged((void**)&b, size);
checkError("allocating UVM memory for B");
hipMallocManaged((void**)&c, size);
checkError("allocating UVM memory for C");
// Init matrices A and B: A = E so result will be B
#pragma omp parallel for private(row, col)
for (row = 0; row < n; ++row){
for (col = 0; col < n; col++){
a[row * n + col] = (row == col) ? 1.0 : 0.0;
b[row * n + col] = row * n + col;
}
}
// do matrix multiplication on device
matrixMulOnDevice(a, b, c, n);
// Compare results
for ( row = 0; row < n; ++row){
for ( col = 0; col < n; ++col) {
absError = fabs ( c[row * n + col] - b[row * n + col]);
sumAbsError += absError;
if (absError > maxAbsError)
maxAbsError = absError;
}
}
// Free memory
hipFree(a);
checkError("Freeing a");
hipFree(b);
checkError("Freeing b");
hipFree(c);
checkError("Freeing c");
printf ("\nmaxAbsError: %4.4f, sumAbsError: %4.4f", maxAbsError, sumAbsError);
if (maxAbsError < 2.0e-5)
printf ("\n\nProgram terminated SUCCESSFULLY.\n\n");
return 0;
}
/*
* Some helper functions
*/
// get compute performance
float getGflops (int n, float time) {
float gf = (2.0e-6 * n * n* n / time);
return gf;
}
// Simple error checking function for CUDA actions
void checkError (const char* action) {
hipError_t error;
error = hipGetLastError();
if (error != hipSuccess) {
printf ("\nError while '%s': %s\nprogram terminated ...\n\n", action, hipGetErrorString(error));
exit (EXIT_FAILURE);
}
}
| 9cb63e78201cb15956bbe283d58cc9c178292b3b.cu | /*
* dgemm_gpu_shared.cu
*
* compile with: make dgemm_gpu_shared_uvm
*
* Matrices are stored as array in row-major order:
* A[row][col] = A[row * N + col]
*
* Use shared memory to speed up the matrix multiplication. We can reuse
* the memory if we load a block of the matrix and have a thread block
* calculate a sub matrix.
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
// Thread block size: BLOCK_SIZE * BLOCK_SIZE
#define BLOCK_SIZE 16
// Declaration of helper functions (see bottom of file for details)
void checkError (const char* action);
float getGflops (int, float);
/*
* Matrix multiplication kernel called by matrixMulOnDevice()
*/
__global__ void dgemm_gpu_shared(double* a, double* b, double* c, int n){
// Allocate shared memory for the two blocks aSub and bSub.
// Use two-dimensional matrices of size BLOCK_SIZE * BLOCK_SIZE
__shared__ double aSub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double bSub[BLOCK_SIZE][BLOCK_SIZE];
// TODO: Calculate global thread index
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = blockDim.y * blockIdx.y + threadIdx.y;
// For the matrix multiplication, we need to multiply all the elements of
// the idxYth row of a with all the elements of the idXth column of b and
// sum up the results.
double sum = 0;
// Calculate global offset of upper left corner of thread block.
int blockaY = blockIdx.y * BLOCK_SIZE;
int blockbX = blockIdx.x * BLOCK_SIZE;
for (int block = 0; block < gridDim.x; ++block){
// Get the two sub matrices
int blockaX = block * (BLOCK_SIZE);
int blockbY = block * (BLOCK_SIZE);
if (((blockaY + threadIdx.y) < n) && (blockaX + threadIdx.x) < n) {
// TODO: Copy block into shared memory
aSub[threadIdx.y][threadIdx.x] = a[(blockaY + threadIdx.y) * n + blockaX + threadIdx.x];
} else {
aSub[threadIdx.y][threadIdx.x] = 0;
}
if (((blockbY + threadIdx.y) < n) && (blockbX + threadIdx.x) < n) {
bSub[threadIdx.y][threadIdx.x] = b[(blockbY + threadIdx.y) * n + blockbX + threadIdx.x];
} else {
bSub[threadIdx.y][threadIdx.x] = 0;
}
__syncthreads(); // Make sure that all threads had time to read the sub matrix.
if ((idxX < n) && (idxY < n))
{
for (int i=0; i < blockDim.x; ++i){ //assumes that we use square blocks
sum += aSub[threadIdx.y][i] * bSub[i][threadIdx.x];
}
}
__syncthreads();
}
if ((idxX < n) && (idxY < n)){
c[idxY * n + idxX] = sum;
}
}
/*
* Matrix multiplication host function called by main()
*/
void matrixMulOnDevice(double* a, double* b, double* c, int n)
{
int xGrid, yGrid;
float time;
// Define events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// First calculate grid size by dividing n by BLOCK_SIZE = 16
xGrid = (n % BLOCK_SIZE == 0) ? (n / BLOCK_SIZE) : (n / BLOCK_SIZE + 1);
yGrid = (n % BLOCK_SIZE == 0) ? (n / BLOCK_SIZE) : (n / BLOCK_SIZE + 1);
dim3 gridDim(xGrid, yGrid);
dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE);
printf("Grid: %d, %d; block:%d, %d\n", xGrid , yGrid , BLOCK_SIZE, BLOCK_SIZE);
// Invoke kernel and measure execution time
cudaEventRecord( start, 0 );
// TODO: Call the kernel
dgemm_gpu_shared<<<gridDim, blockDim>>>(a, b, c, n);
cudaDeviceSynchronize();
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
checkError("executing Kernel");
// Get elapsed time for kernel execution
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf ("\nKernel Execution Time: %f ms (dim C: %d * %d)", time, n, n);
printf ("\nThis corresponds to: %4.4f GFLOPS", getGflops(n, time));
}
int main(int argc, char** argv)
{
int n = 1024;
double *a, *b, *c;
int row, col;
double absError, maxAbsError = 0.0, sumAbsError = 0.0;
size_t size;
if (argc > 1) {
n = atoi(argv[1]);
}
// show banner
printf ("\n\n Matrix-Multiplication \n");
printf ( " ==========================================\n");
printf ( "\n Simple DGEMM implemantation on GPU");
// echo device data
int idevice = 0;
cudaSetDevice(idevice);
cudaDeviceProp dprops;
cudaGetDeviceProperties( &dprops, idevice );
printf ("\n Device name = %s, with compute capability %d.%d \n",
dprops.name, dprops.major, dprops.minor);
printf ( "\n Matrix size %d x %d", n, n);
// Allocate memory for matrices (that can be accessed from host and device)
size = n * n * sizeof(double);
cudaMallocManaged((void**)&a, size);
checkError("allocating UVM memory for A");
cudaMallocManaged((void**)&b, size);
checkError("allocating UVM memory for B");
cudaMallocManaged((void**)&c, size);
checkError("allocating UVM memory for C");
// Init matrices A and B: A = E so result will be B
#pragma omp parallel for private(row, col)
for (row = 0; row < n; ++row){
for (col = 0; col < n; col++){
a[row * n + col] = (row == col) ? 1.0 : 0.0;
b[row * n + col] = row * n + col;
}
}
// do matrix multiplication on device
matrixMulOnDevice(a, b, c, n);
// Compare results
for ( row = 0; row < n; ++row){
for ( col = 0; col < n; ++col) {
absError = fabs ( c[row * n + col] - b[row * n + col]);
sumAbsError += absError;
if (absError > maxAbsError)
maxAbsError = absError;
}
}
// Free memory
cudaFree(a);
checkError("Freeing a");
cudaFree(b);
checkError("Freeing b");
cudaFree(c);
checkError("Freeing c");
printf ("\nmaxAbsError: %4.4f, sumAbsError: %4.4f", maxAbsError, sumAbsError);
if (maxAbsError < 2.0e-5)
printf ("\n\nProgram terminated SUCCESSFULLY.\n\n");
return 0;
}
/*
* Some helper functions
*/
// get compute performance
float getGflops (int n, float time) {
float gf = (2.0e-6 * n * n* n / time);
return gf;
}
// Simple error checking function for CUDA actions
void checkError (const char* action) {
cudaError_t error;
error = cudaGetLastError();
if (error != cudaSuccess) {
printf ("\nError while '%s': %s\nprogram terminated ...\n\n", action, cudaGetErrorString(error));
exit (EXIT_FAILURE);
}
}
|
b2e0a60020eaca78c46e1b3c60f7d3006a3a4ede.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduceUnrolling.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
unsigned int q = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduceUnrolling), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n,q);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduceUnrolling), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n,q);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduceUnrolling), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n,q);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b2e0a60020eaca78c46e1b3c60f7d3006a3a4ede.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduceUnrolling.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
unsigned int q = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduceUnrolling<<<gridBlock,threadBlock>>>(g_idata,g_odata,n,q);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduceUnrolling<<<gridBlock,threadBlock>>>(g_idata,g_odata,n,q);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduceUnrolling<<<gridBlock,threadBlock>>>(g_idata,g_odata,n,q);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
890979112031f751b0a41c2286d45d5a86ef4118.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdio.h>
#include<chrono>
using namespace std;
using namespace std::chrono;
/*__global__ void addition(int *a, int *b, int *c, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = large_id; i < min(large_id + 256, n); i++) {
c[i] = a[i] + b[i];
printf("Test 1 2 3");
}
}*/
__global__ void addition(int *a, int *b, int *c, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
while (large_id < n) {
//if(large_id < n) {
c[large_id] = a[large_id] + b[large_id];
large_id += blockDim.x*gridDim.x;
}
}
void addition_cpu(int *a, int *b, int *c, int n) {
for(int i = 0; i < n; i++) {
c[i] = a[i] + b[i];
}
}
int main(void) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int n;
cin>>n;
cout<<n;
a = (int *) malloc(n * sizeof(int));
b = (int *) malloc(n * sizeof(int));
c = (int *) malloc(n * sizeof(int));
for(int i = 0; i < n; i++) {
a[i] = i + 1;
b[i] = i + 1;
c[i] = 0;
}
int size = n * sizeof(int);
hipMalloc(&dev_a, size);
hipMalloc(&dev_b, size);
hipMalloc(&dev_c, size);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
int blocks, threads;
blocks = threads = ceil(n * 1.0f/256.0f);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( addition) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c, n);
auto stop = high_resolution_clock::now();
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
cout<<"\nFor GPU:"<<endl;
/*for(int i = 0; i < n; i++) {
cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
}*/
//verify that gpu did work
int count = 0;
bool success = true;
for(int i = 0; i < n; i++) {
if((a[i] + b[i]) != c[i]) {
cout<<"Error in "<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
success = false;
count++;
}
}
if (success) cout<<"We did it"<<endl;
cout<<"Number of errors: "<<count<<endl;
cout<<"\nTime taken for parallel processing: "<<duration_cast <microseconds> (stop - start).count()<<endl;
for(int i =0; i < n; i++) {
c[i] = 0;
}
start = high_resolution_clock::now();
addition_cpu(a, b, c, n);
stop = high_resolution_clock::now();
cout<<"For CPU: "<<endl;
cout<<"\nTime taken for serial processing"<<duration_cast <microseconds> (stop - start).count() <<endl;
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
} | 890979112031f751b0a41c2286d45d5a86ef4118.cu | #include<iostream>
#include<stdio.h>
#include<chrono>
using namespace std;
using namespace std::chrono;
/*__global__ void addition(int *a, int *b, int *c, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = large_id; i < min(large_id + 256, n); i++) {
c[i] = a[i] + b[i];
printf("Test 1 2 3");
}
}*/
__global__ void addition(int *a, int *b, int *c, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
while (large_id < n) {
//if(large_id < n) {
c[large_id] = a[large_id] + b[large_id];
large_id += blockDim.x*gridDim.x;
}
}
void addition_cpu(int *a, int *b, int *c, int n) {
for(int i = 0; i < n; i++) {
c[i] = a[i] + b[i];
}
}
int main(void) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int n;
cin>>n;
cout<<n;
a = (int *) malloc(n * sizeof(int));
b = (int *) malloc(n * sizeof(int));
c = (int *) malloc(n * sizeof(int));
for(int i = 0; i < n; i++) {
a[i] = i + 1;
b[i] = i + 1;
c[i] = 0;
}
int size = n * sizeof(int);
cudaMalloc(&dev_a, size);
cudaMalloc(&dev_b, size);
cudaMalloc(&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
int blocks, threads;
blocks = threads = ceil(n * 1.0f/256.0f);
auto start = high_resolution_clock::now();
addition <<<blocks, threads>>> (dev_a, dev_b, dev_c, n);
auto stop = high_resolution_clock::now();
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
cout<<"\nFor GPU:"<<endl;
/*for(int i = 0; i < n; i++) {
cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
}*/
//verify that gpu did work
int count = 0;
bool success = true;
for(int i = 0; i < n; i++) {
if((a[i] + b[i]) != c[i]) {
cout<<"Error in "<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
success = false;
count++;
}
}
if (success) cout<<"We did it"<<endl;
cout<<"Number of errors: "<<count<<endl;
cout<<"\nTime taken for parallel processing: "<<duration_cast <microseconds> (stop - start).count()<<endl;
for(int i =0; i < n; i++) {
c[i] = 0;
}
start = high_resolution_clock::now();
addition_cpu(a, b, c, n);
stop = high_resolution_clock::now();
cout<<"For CPU: "<<endl;
cout<<"\nTime taken for serial processing"<<duration_cast <microseconds> (stop - start).count() <<endl;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
} |
8e6287c2efc2deae2444a2ddf983a2cbe753b89a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)]
#define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)]
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)]
#define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)]
#define d_mat_dvz_dz(z,x) d_mat_dvz_dz[(x)*(nz)+(z)]
#define d_mat_dvx_dx(z,x) d_mat_dvx_dx[(x)*(nz)+(z)]
#define d_Cp(z,x) d_Cp[(x)*(nz)+(z)]
#define d_CpGrad(z,x) d_CpGrad[(x)*(nz)+(z)]
__global__ void ac_pressure_adj(float *d_vz, float *d_vx, float *d_szz, \
float *d_mem_dvz_dz, float *d_mem_dvx_dx, float *d_mem_dszz_dz, float *d_mem_dsxx_dx, \
float *d_Lambda, float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,\
float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, \
float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \
float *d_K_z, float *d_a_z, float *d_b_z, \
float *d_K_x, float *d_a_x, float *d_b_x, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, \
float *d_Cp, float *d_mat_dvz_dz, float *d_mat_dvx_dx, float * d_CpGrad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
float dvz_dz = 0.0;
float dvx_dx = 0.0;
float dphiz_dz = 0.0;
float dphix_dx = 0.0;
float c1 = 9.0/8.0;
float c2 = 1.0/24.0;
if (gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
// dvz_dz = c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx));
// dvx_dx = c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) - c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2));
// dphiz_dz = c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) \
// - c2*(d_mem_dszz_dz(gidz+2,gidx)-d_mem_dszz_dz(gidz-1,gidx));
// dphix_dx = c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \
// - c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2));
// // update stress
// d_szz(gidz,gidx) += -1.0 * d_Lambda(gidz,gidx)*dt * (d_a_x_half[gidx]*dphix_dx + d_a_z[gidz]*dphiz_dz \
// + dvx_dx/d_K_x_half[gidx]/dx + dvz_dz/d_K_z[gidz]/dz);
// d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + d_szz(gidz, gidx)/dx;
// d_mem_dvz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz, gidx) + d_szz(gidz, gidx)/dz;
// forward difference
// if (gidz == 2) {
// dvz_dz = c1*(d_vz(2,gidx)-d_vz(3,gidx)) + c2*d_vz(4,gidx);
// dphiz_dz = c1*(d_mem_dszz_dz(2,gidx)-d_mem_dszz_dz(3,gidx)) + c2*d_mem_dszz_dz(4,gidx);
// }
// else if (gidz == nz-nPad-3) {
// dvz_dz = c1*d_vz(gidz,gidx) - c2*d_vz(gidz-1,gidx);
// dphiz_dz = c1*d_mem_dszz_dz(gidz,gidx) - c2*d_mem_dszz_dz(gidz-1,gidx);
// }
// else if (gidz == nz-nPad-4) {
// dvz_dz = -c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*d_vz(gidz-1,gidx);
// dphiz_dz = -c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) - c2*d_mem_dszz_dz(gidz-1,gidx);
// }
// else {
dvz_dz = (-c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) + c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx)))/dz;
dphiz_dz = (-c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) \
+ c2*(d_mem_dszz_dz(gidz+2,gidx)-d_mem_dszz_dz(gidz-1,gidx)))/dz;
// }
// backward difference
// if (gidx == 2) {
// dvx_dx = -c1*d_vx(gidz,gidx) + c2*d_vx(gidz,gidx+1);
// dphix_dx = -c1*d_mem_dsxx_dx(gidz,gidx) + c2*d_mem_dsxx_dx(gidz,gidx+1);
// }
// if (gidx == 3) {
// dvx_dx = -c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*d_vx(gidz,gidx+1);
// dphix_dx = -c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \
// + c2*d_mem_dsxx_dx(gidz,gidx+1);
// }
// else if (gidx == nx-3) {
// dvx_dx = c1*(d_vx(gidz,gidx-1)-d_vx(gidz,gidx)) - c2*d_vx(gidz,gidx-2);
// dphix_dx = c1*(d_mem_dsxx_dx(gidz,gidx-1)-d_mem_dsxx_dx(gidz,gidx)) - c2*d_mem_dsxx_dx(gidz,gidx-2);
// }
// else {
dvx_dx = (-c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)))/dx;
dphix_dx = (-c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \
+ c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2)))/dx;
// }
// update stress
d_szz(gidz,gidx) += d_a_x_half[gidx]*dphix_dx + d_a_z[gidz]*dphiz_dz \
+ d_ave_Byc_b(gidz, gidx)*dvx_dx/d_K_x_half[gidx]*dt + d_ave_Byc_a(gidz, gidx)*dvz_dz/d_K_z[gidz]*dt;
if(gidx<=nPml || gidx>=nx-nPml-1){
d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + d_Lambda(gidz, gidx)*d_szz(gidz, gidx)*dt;
}
if(gidz<=nPml || (gidz>=nz-nPml-nPad-1)){
d_mem_dvz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz, gidx) + d_Lambda(gidz, gidx)*d_szz(gidz, gidx)*dt;
}
}
else {
return;
}
}
| 8e6287c2efc2deae2444a2ddf983a2cbe753b89a.cu | #define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)]
#define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)]
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)]
#define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)]
#define d_mat_dvz_dz(z,x) d_mat_dvz_dz[(x)*(nz)+(z)]
#define d_mat_dvx_dx(z,x) d_mat_dvx_dx[(x)*(nz)+(z)]
#define d_Cp(z,x) d_Cp[(x)*(nz)+(z)]
#define d_CpGrad(z,x) d_CpGrad[(x)*(nz)+(z)]
__global__ void ac_pressure_adj(float *d_vz, float *d_vx, float *d_szz, \
float *d_mem_dvz_dz, float *d_mem_dvx_dx, float *d_mem_dszz_dz, float *d_mem_dsxx_dx, \
float *d_Lambda, float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,\
float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, \
float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \
float *d_K_z, float *d_a_z, float *d_b_z, \
float *d_K_x, float *d_a_x, float *d_b_x, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, \
float *d_Cp, float *d_mat_dvz_dz, float *d_mat_dvx_dx, float * d_CpGrad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
float dvz_dz = 0.0;
float dvx_dx = 0.0;
float dphiz_dz = 0.0;
float dphix_dx = 0.0;
float c1 = 9.0/8.0;
float c2 = 1.0/24.0;
if (gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
// dvz_dz = c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx));
// dvx_dx = c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) - c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2));
// dphiz_dz = c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) \
// - c2*(d_mem_dszz_dz(gidz+2,gidx)-d_mem_dszz_dz(gidz-1,gidx));
// dphix_dx = c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \
// - c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2));
// // update stress
// d_szz(gidz,gidx) += -1.0 * d_Lambda(gidz,gidx)*dt * (d_a_x_half[gidx]*dphix_dx + d_a_z[gidz]*dphiz_dz \
// + dvx_dx/d_K_x_half[gidx]/dx + dvz_dz/d_K_z[gidz]/dz);
// d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + d_szz(gidz, gidx)/dx;
// d_mem_dvz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz, gidx) + d_szz(gidz, gidx)/dz;
// forward difference
// if (gidz == 2) {
// dvz_dz = c1*(d_vz(2,gidx)-d_vz(3,gidx)) + c2*d_vz(4,gidx);
// dphiz_dz = c1*(d_mem_dszz_dz(2,gidx)-d_mem_dszz_dz(3,gidx)) + c2*d_mem_dszz_dz(4,gidx);
// }
// else if (gidz == nz-nPad-3) {
// dvz_dz = c1*d_vz(gidz,gidx) - c2*d_vz(gidz-1,gidx);
// dphiz_dz = c1*d_mem_dszz_dz(gidz,gidx) - c2*d_mem_dszz_dz(gidz-1,gidx);
// }
// else if (gidz == nz-nPad-4) {
// dvz_dz = -c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*d_vz(gidz-1,gidx);
// dphiz_dz = -c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) - c2*d_mem_dszz_dz(gidz-1,gidx);
// }
// else {
dvz_dz = (-c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) + c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx)))/dz;
dphiz_dz = (-c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) \
+ c2*(d_mem_dszz_dz(gidz+2,gidx)-d_mem_dszz_dz(gidz-1,gidx)))/dz;
// }
// backward difference
// if (gidx == 2) {
// dvx_dx = -c1*d_vx(gidz,gidx) + c2*d_vx(gidz,gidx+1);
// dphix_dx = -c1*d_mem_dsxx_dx(gidz,gidx) + c2*d_mem_dsxx_dx(gidz,gidx+1);
// }
// if (gidx == 3) {
// dvx_dx = -c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*d_vx(gidz,gidx+1);
// dphix_dx = -c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \
// + c2*d_mem_dsxx_dx(gidz,gidx+1);
// }
// else if (gidx == nx-3) {
// dvx_dx = c1*(d_vx(gidz,gidx-1)-d_vx(gidz,gidx)) - c2*d_vx(gidz,gidx-2);
// dphix_dx = c1*(d_mem_dsxx_dx(gidz,gidx-1)-d_mem_dsxx_dx(gidz,gidx)) - c2*d_mem_dsxx_dx(gidz,gidx-2);
// }
// else {
dvx_dx = (-c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)))/dx;
dphix_dx = (-c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \
+ c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2)))/dx;
// }
// update stress
d_szz(gidz,gidx) += d_a_x_half[gidx]*dphix_dx + d_a_z[gidz]*dphiz_dz \
+ d_ave_Byc_b(gidz, gidx)*dvx_dx/d_K_x_half[gidx]*dt + d_ave_Byc_a(gidz, gidx)*dvz_dz/d_K_z[gidz]*dt;
if(gidx<=nPml || gidx>=nx-nPml-1){
d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + d_Lambda(gidz, gidx)*d_szz(gidz, gidx)*dt;
}
if(gidz<=nPml || (gidz>=nz-nPml-nPad-1)){
d_mem_dvz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz, gidx) + d_Lambda(gidz, gidx)*d_szz(gidz, gidx)*dt;
}
}
else {
return;
}
}
|
4daa478c7564ab21e56c994467fa8d66127c9565.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace nd4j {
////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void execFillIsMax(void *vdZ, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
auto dz = reinterpret_cast<T*>(vdZ);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x)
dz[shape::getIndexOffset(i, xShapeInfo)] = (i == idx ? (T) 1 : (T) 0);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
__host__ void fillIsMaxGeneric(dim3 &launchDims, hipStream_t *stream, void *dx, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
hipLaunchKernelGGL(( execFillIsMax<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, xShapeInfo, length, idx);
nd4j::DebugHelper::checkErrorCode(stream, "fillIsMax(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT fillIsMaxGeneric, (dim3& launchDims, hipStream_t *stream, void* dz, Nd4jLong *zShapeInfo, Nd4jLong length, long idx), LIBND4J_TYPES);
} | 4daa478c7564ab21e56c994467fa8d66127c9565.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace nd4j {
////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ void execFillIsMax(void *vdZ, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
auto dz = reinterpret_cast<T*>(vdZ);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x)
dz[shape::getIndexOffset(i, xShapeInfo)] = (i == idx ? (T) 1 : (T) 0);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
__host__ void fillIsMaxGeneric(dim3 &launchDims, cudaStream_t *stream, void *dx, Nd4jLong *xShapeInfo, Nd4jLong length, long idx) {
execFillIsMax<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, xShapeInfo, length, idx);
nd4j::DebugHelper::checkErrorCode(stream, "fillIsMax(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT fillIsMaxGeneric, (dim3& launchDims, cudaStream_t *stream, void* dz, Nd4jLong *zShapeInfo, Nd4jLong length, long idx), LIBND4J_TYPES);
} |
4ef8aa13369dfb6a89db7e5b3f88eb1627e8fc71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmdotc.cu, normal z -> c, Mon Jun 25 18:24:26 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define COMPLEX
// dot product for multiple vectors
__global__ void
magma_cmdotc1_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// block reduction for 1 vectors
__global__ void
magma_cmdotc1_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] +=
( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
/**
Purpose
-------
Computes the scalar product of a set of 1 vectors such that
skp[0] = [ <v_0,w_0> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc1(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (local_block_size) * sizeof( magmaFloatComplex ); // 1 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_cmdotc1_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_cmdotc1_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 1 , aux1, 1, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 2 dot products //
// initialize arrays with zero
__global__ void
magma_cmdotc2_gpumemzero(
magmaFloatComplex * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < 2; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_cmdotc2_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * v1,
magmaFloatComplex * w1,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 2 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 2 vectors
__global__ void
magma_cmdotc2_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 2 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
v1 magmaFloatComplex_ptr
input vector
@param[in]
w1 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc2(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr v1,
magmaFloatComplex_ptr w1,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2 * (local_block_size) * sizeof( magmaFloatComplex ); // 4 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_cmdotc2_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, v1, w1, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_cmdotc2_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 2 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 3 dot products //
// initialize arrays with zero
__global__ void
magma_cmdotc3_gpumemzero(
magmaFloatComplex * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < 3; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_cmdotc3_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * v1,
magmaFloatComplex * w1,
magmaFloatComplex * v2,
magmaFloatComplex * w2,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 3 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_C_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 3 vectors
__global__ void
magma_cmdotc3_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<3; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
v1 magmaFloatComplex_ptr
input vector
@param[in]
w1 magmaFloatComplex_ptr
input vector
@param[in]
v2 magmaFloatComplex_ptr
input vector
@param[in]
w2 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc3(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr v1,
magmaFloatComplex_ptr w1,
magmaFloatComplex_ptr v2,
magmaFloatComplex_ptr w2,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 3 * (local_block_size) * sizeof( magmaFloatComplex ); // 4 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
// magma_cmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n );
hipLaunchKernelGGL(( magma_cmdotc3_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, v1, w1, v2, w2, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_cmdotc3_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 3 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 4 dot products //
// initialize arrays with zero
__global__ void
magma_cmdotc4_gpumemzero(
magmaFloatComplex * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < 4; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_cmdotc4_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * v1,
magmaFloatComplex * w1,
magmaFloatComplex * v2,
magmaFloatComplex * w2,
magmaFloatComplex * v3,
magmaFloatComplex * w3,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 4 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_C_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_C_ZERO;
temp[ Idx + 3*blockDim.x ] = ( i < n ) ?
v3[ i ] * w3[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 4 vectors
__global__ void
magma_cmdotc4_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<4; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
v1 magmaFloatComplex_ptr
input vector
@param[in]
w1 magmaFloatComplex_ptr
input vector
@param[in]
v2 magmaFloatComplex_ptr
input vector
@param[in]
w2 magmaFloatComplex_ptr
input vector
@param[in]
v3 magmaFloatComplex_ptr
input vector
@param[in]
w3 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc4(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr v1,
magmaFloatComplex_ptr w1,
magmaFloatComplex_ptr v2,
magmaFloatComplex_ptr w2,
magmaFloatComplex_ptr v3,
magmaFloatComplex_ptr w3,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4 * (local_block_size) * sizeof( magmaFloatComplex ); // 4 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_cmdotc4_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_cmdotc4_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 4 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
| 4ef8aa13369dfb6a89db7e5b3f88eb1627e8fc71.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmdotc.cu, normal z -> c, Mon Jun 25 18:24:26 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define COMPLEX
// dot product for multiple vectors
__global__ void
magma_cmdotc1_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// block reduction for 1 vectors
__global__ void
magma_cmdotc1_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] +=
( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];
__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
/**
Purpose
-------
Computes the scalar product of a set of 1 vectors such that
skp[0] = [ <v_0,w_0> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc1(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (local_block_size) * sizeof( magmaFloatComplex ); // 1 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_cmdotc1_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_cmdotc1_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 1 , aux1, 1, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 2 dot products //
// initialize arrays with zero
__global__ void
magma_cmdotc2_gpumemzero(
magmaFloatComplex * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < 2; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_cmdotc2_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * v1,
magmaFloatComplex * w1,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 2 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 2 vectors
__global__ void
magma_cmdotc2_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 2 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
v1 magmaFloatComplex_ptr
input vector
@param[in]
w1 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc2(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr v1,
magmaFloatComplex_ptr w1,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2 * (local_block_size) * sizeof( magmaFloatComplex ); // 4 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_cmdotc2_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, v1, w1, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_cmdotc2_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 2 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 3 dot products //
// initialize arrays with zero
__global__ void
magma_cmdotc3_gpumemzero(
magmaFloatComplex * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < 3; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_cmdotc3_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * v1,
magmaFloatComplex * w1,
magmaFloatComplex * v2,
magmaFloatComplex * w2,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 3 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_C_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 3 vectors
__global__ void
magma_cmdotc3_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<3; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<3; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<3; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<3; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<3; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
v1 magmaFloatComplex_ptr
input vector
@param[in]
w1 magmaFloatComplex_ptr
input vector
@param[in]
v2 magmaFloatComplex_ptr
input vector
@param[in]
w2 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[3] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc3(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr v1,
magmaFloatComplex_ptr w1,
magmaFloatComplex_ptr v2,
magmaFloatComplex_ptr w2,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 3 * (local_block_size) * sizeof( magmaFloatComplex ); // 4 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
// magma_cmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n );
magma_cmdotc3_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, v1, w1, v2, w2, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_cmdotc3_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 3 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
// 4 dot products //
// initialize arrays with zero
__global__ void
magma_cmdotc4_gpumemzero(
magmaFloatComplex * d,
int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < 4; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product for multiple vectors
__global__ void
magma_cmdotc4_kernel_1(
int Gs,
int n,
magmaFloatComplex * v0,
magmaFloatComplex * w0,
magmaFloatComplex * v1,
magmaFloatComplex * w1,
magmaFloatComplex * v2,
magmaFloatComplex * w2,
magmaFloatComplex * v3,
magmaFloatComplex * w3,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 4 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_C_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v1[ i ] * w1[ i ] : MAGMA_C_ZERO;
temp[ Idx + 2*blockDim.x ] = ( i < n ) ?
v2[ i ] * w2[ i ] : MAGMA_C_ZERO;
temp[ Idx + 3*blockDim.x ] = ( i < n ) ?
v3[ i ] * w3[ i ] : MAGMA_C_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for 4 vectors
__global__ void
magma_cmdotc4_kernel_2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<4; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<4; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#ifdef COMPLEX
if( Idx < 32 ){
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<4; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#ifdef REAL
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<4; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<4; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of 4 vectors such that
skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ]
Returns the vector skp.
In case there are less dot products required, an easy workaround is
given by doubling input.
Arguments
---------
@param[in]
n int
length of v_i and w_i
@param[in]
v0 magmaFloatComplex_ptr
input vector
@param[in]
w0 magmaFloatComplex_ptr
input vector
@param[in]
v1 magmaFloatComplex_ptr
input vector
@param[in]
w1 magmaFloatComplex_ptr
input vector
@param[in]
v2 magmaFloatComplex_ptr
input vector
@param[in]
w2 magmaFloatComplex_ptr
input vector
@param[in]
v3 magmaFloatComplex_ptr
input vector
@param[in]
w3 magmaFloatComplex_ptr
input vector
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[4] of scalar products [<v_i, w_i>]
This vector is located on the host
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc4(
magma_int_t n,
magmaFloatComplex_ptr v0,
magmaFloatComplex_ptr w0,
magmaFloatComplex_ptr v1,
magmaFloatComplex_ptr w1,
magmaFloatComplex_ptr v2,
magmaFloatComplex_ptr w2,
magmaFloatComplex_ptr v3,
magmaFloatComplex_ptr w3,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4 * (local_block_size) * sizeof( magmaFloatComplex ); // 4 skp
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_cmdotc4_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_cmdotc4_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
// copy vectors to host
magma_cgetvector( 4 , aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
|
8c594d410aa4c5f34e51df5ceb2ff32c395c75ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgemvmdot.cu, normal z -> c, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// initialize arrays with zero
__global__ void
magma_cgpumemzero(
magmaFloatComplex * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < k; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_cdot_kernel(
int Gs,
int n,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_cblockdot_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_fast( int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel_fast(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( magmaFloatComplex ); // k vecs
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_cblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_cdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_claset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_claset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_cblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_ccopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_cblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_ccopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cgemvmdot(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_cmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_cmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| 8c594d410aa4c5f34e51df5ceb2ff32c395c75ec.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgemvmdot.cu, normal z -> c, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// initialize arrays with zero
__global__ void
magma_cgpumemzero(
magmaFloatComplex * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < k; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_cdot_kernel(
int Gs,
int n,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_cblockdot_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_fast( int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel_fast(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( magmaFloatComplex ); // k vecs
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_cblockdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_cdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_claset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_claset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_cblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_ccopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_cblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_ccopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cgemvmdot(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_cmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_cmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
0b1cc217fc7aa689b68915885534ffe7dc0ec539.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__ void join(
TUPLE *lt,
TUPLE *rt,
RESULT *jt,
int *count,
int *r_p,
int *radix,
int *lp,
int right,
int left
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ TUPLE sub_lt[B_ROW_NUM];
//printf("%d\t%d\n",lp[blockIdx.x+1],lp[blockIdx.x]);
for(int i=lp[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<lp[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
/*
if(threadIdx.x==0){
for(int j=0; j<lp[blockIdx.x+1]-lp[blockIdx.x]; j++){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[j+lp[blockIdx.x]].key;
sub_lt[j].val = lt[j+lp[blockIdx.x]].val;
}
}
}
*/
/*
if(threadIdx.x<lp[blockIdx.x+1]-lp[blockIdx.x]){
sub_lt[threadIdx.x].key = lt[threadIdx.x+lp[blockIdx.x]].key;
sub_lt[threadIdx.x].val = lt[threadIdx.x+lp[blockIdx.x]].val;
}
*/
__syncthreads();
//printf("%d\t%d\t%d\n",r_p[radix[blockIdx.x]+1],r_p[radix[blockIdx.x]],radix[blockIdx.x]);
TUPLE temp;
int tcount=count[x];
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<r_p[radix[blockIdx.x]+1] ; k += blockDim.x){
temp.key = rt[k].key;
temp.val = rt[k].val;
for(int i=0; i<lp[blockIdx.x+1] - lp[blockIdx.x] ;i++){
if(sub_lt[i].val == temp.val){
jt[tcount].rkey = temp.key;
jt[tcount].rval = temp.val;
jt[tcount].lkey = sub_lt[i].key;
jt[tcount].lval = sub_lt[i].val;
/*
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
*/
tcount++;
}
}
}
//printf("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",temp,temp2,blockIdx.x,threadIdx.x,lp[blockIdx.x+1],lp[blockIdx.x],r_p[radix[blockIdx.x]+1],r_p[radix[blockIdx.x]]);
}
}
| 0b1cc217fc7aa689b68915885534ffe7dc0ec539.cu | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__ void join(
TUPLE *lt,
TUPLE *rt,
RESULT *jt,
int *count,
int *r_p,
int *radix,
int *lp,
int right,
int left
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ TUPLE sub_lt[B_ROW_NUM];
//printf("%d\t%d\n",lp[blockIdx.x+1],lp[blockIdx.x]);
for(int i=lp[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<lp[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[i].key;
sub_lt[j].val = lt[i].val;
}
}
/*
if(threadIdx.x==0){
for(int j=0; j<lp[blockIdx.x+1]-lp[blockIdx.x]; j++){
if(j<B_ROW_NUM){
sub_lt[j].key = lt[j+lp[blockIdx.x]].key;
sub_lt[j].val = lt[j+lp[blockIdx.x]].val;
}
}
}
*/
/*
if(threadIdx.x<lp[blockIdx.x+1]-lp[blockIdx.x]){
sub_lt[threadIdx.x].key = lt[threadIdx.x+lp[blockIdx.x]].key;
sub_lt[threadIdx.x].val = lt[threadIdx.x+lp[blockIdx.x]].val;
}
*/
__syncthreads();
//printf("%d\t%d\t%d\n",r_p[radix[blockIdx.x]+1],r_p[radix[blockIdx.x]],radix[blockIdx.x]);
TUPLE temp;
int tcount=count[x];
for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<r_p[radix[blockIdx.x]+1] ; k += blockDim.x){
temp.key = rt[k].key;
temp.val = rt[k].val;
for(int i=0; i<lp[blockIdx.x+1] - lp[blockIdx.x] ;i++){
if(sub_lt[i].val == temp.val){
jt[tcount].rkey = temp.key;
jt[tcount].rval = temp.val;
jt[tcount].lkey = sub_lt[i].key;
jt[tcount].lval = sub_lt[i].val;
/*
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
temp.key = sub_lt[i].key;
temp.val = sub_lt[i].val;
*/
tcount++;
}
}
}
//printf("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",temp,temp2,blockIdx.x,threadIdx.x,lp[blockIdx.x+1],lp[blockIdx.x],r_p[radix[blockIdx.x]+1],r_p[radix[blockIdx.x]]);
}
}
|
9d9b5ce29da443de801c9336b2dd4b342274d97b.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "cuda_code.h"
}
#include <hip/hip_runtime.h>
// CUDA-C includes
#include <hip/hip_runtime.h>
int dev_id[4];
uint32_t num_dev = 0;
void device_info(){
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
for (dev = 0; dev < deviceCount; ++dev)
{
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
}
}
void init_gpu_devices(){
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
int dev, driverVersion = 0, runtimeVersion = 0;
for(dev = 0; dev < deviceCount; ++dev){
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if((float)deviceProp.totalGlobalMem/1048576.0f > 6000){
dev_id[num_dev] = dev;
num_dev++;
//printf("Device %d: \"%s\"\n", dev, deviceProp.name);
//printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
}
}
} | 9d9b5ce29da443de801c9336b2dd4b342274d97b.cu | extern "C" {
#include "cuda_code.h"
}
#include <cuda_runtime.h>
// CUDA-C includes
#include <cuda.h>
int dev_id[4];
uint32_t num_dev = 0;
void device_info(){
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
for (dev = 0; dev < deviceCount; ++dev)
{
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
}
}
void init_gpu_devices(){
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
int dev, driverVersion = 0, runtimeVersion = 0;
for(dev = 0; dev < deviceCount; ++dev){
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if((float)deviceProp.totalGlobalMem/1048576.0f > 6000){
dev_id[num_dev] = dev;
num_dev++;
//printf("Device %d: \"%s\"\n", dev, deviceProp.name);
//printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
}
}
} |
3c60c94a0f2e94c8339fad68aa46b46c8ddcc71f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../cuda_utils/set_inter_device_functions.cuh"
#include "../cuda_utils/cuda_util.cuh"
#include "../scan_xp_common.cu" // copy the common codes and paste here, just a pre-processing by compiler
#ifndef TILE_SRC
#define TILE_SRC (4)
#endif
#ifndef TILE_DST
#define TILE_DST (8)
#endif
__global__ void set_intersection_GPU_shared(uint32_t *d_offsets, /*card: |V|+1*/
int32_t *d_dsts, /*card: 2*|E|*/
int32_t *d_intersection_count_GPU) /*card: 2*|E|*/
{
const uint32_t tid = threadIdx.x * blockDim.y + threadIdx.y; // in [0, 32)
const uint32_t u = blockIdx.x;
const uint32_t off_u_beg = d_offsets[u];
const uint32_t off_u_end = d_offsets[u + 1];
#if defined(BASELINE)
for (uint32_t off_u_iter = off_u_beg + tid; off_u_iter < off_u_end; off_u_iter += 32) {
auto v = d_dsts[off_u_iter];
if (u > v)
continue; /*skip when u > v*/
d_intersection_count_GPU[off_u_iter] = 2 + ComputeCNNaiveStdMergeDevice(d_offsets, d_dsts, u, v);
}
#elif defined(BASELINE_HYBRID)
for (uint32_t off_u_iter = off_u_beg + tid; off_u_iter < off_u_end; off_u_iter += 32) {
auto v = d_dsts[off_u_iter];
if (u > v)
continue; /*skip when u > v*/
d_intersection_count_GPU[off_u_iter] = 2 + ComputeCNHybridDevice(d_offsets, d_dsts, u, d_dsts[off_u_iter]);
}
#else
#ifndef WARP_SIZE
#define WARP_SIZE 32
#endif
#ifndef SHARED_MEM_SIZE
#define SHARED_MEM_SIZE 64
#endif
__shared__ int32_t u_neis[SHARED_MEM_SIZE], v_neis[SHARED_MEM_SIZE];
/*traverse all the neighbors(destination nodes)*/
for (uint32_t off_u_iter = d_offsets[u]; off_u_iter < off_u_end; off_u_iter++) {
const int32_t v = d_dsts[off_u_iter];
uint32_t private_count = 0;
if (u > v)
continue; /*skip when u > v*/
uint32_t off_u = off_u_beg;
uint32_t off_v_beg = d_offsets[v];
uint32_t off_v = off_v_beg;
uint32_t off_v_end = d_offsets[v + 1];
// first-time load
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_u + i < off_u_end)
u_neis[i] = d_dsts[off_u + i];
}
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_v + i < off_v_end)
v_neis[i] = d_dsts[off_v + i];
}
while (true) {
// commit 32 comparisons
uint32_t off_u_local = threadIdx.x + off_u; /*A[0-3]*/
uint32_t off_v_local = threadIdx.y + off_v; /*B[0-7]*/
// 1st: all-pairs comparisons
uint32_t elem_src =
(off_u_local < off_u_end) ? u_neis[(off_u_local - off_u_beg) % SHARED_MEM_SIZE] : (UINT32_MAX);
uint32_t elem_dst =
(off_v_local < off_v_end) ? v_neis[(off_v_local - off_v_beg) % SHARED_MEM_SIZE] : (UINT32_MAX - 1);
if (elem_src == elem_dst)
private_count++;
// 2nd: advance by 4 elements in A or 8 elements in B
elem_src =
(off_u + TILE_SRC - 1 < off_u_end) ?
u_neis[(off_u + TILE_SRC - 1 - off_u_beg) % SHARED_MEM_SIZE] : (UINT32_MAX);
elem_dst =
(off_v + TILE_DST - 1 < off_v_end) ?
v_neis[(off_v + TILE_DST - 1 - off_v_beg) % SHARED_MEM_SIZE] : (UINT32_MAX);
// check whether to exit
if (elem_src == UINT32_MAX && elem_dst == UINT32_MAX)
break;
if (elem_src < elem_dst) {
off_u += TILE_SRC;
if ((off_u - off_u_beg) % SHARED_MEM_SIZE == 0) {
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_u + i < off_u_end)
u_neis[i] = d_dsts[off_u + i];
}
}
} else {
off_v += TILE_DST;
if ((off_v - off_v_beg) % SHARED_MEM_SIZE == 0) {
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_v + i < off_v_end)
v_neis[i] = d_dsts[off_v + i];
}
}
}
}
/*single warp reduction*/
for (int offset = 16; offset > 0; offset >>= 1)
private_count += __shfl_down(private_count, offset);
if (tid == 0)
d_intersection_count_GPU[off_u_iter] = 2 + private_count;
}
#endif
}
/*for bitmap-based set intersection*/
#define BITMAP_SCALE_LOG (9)
#define BITMAP_SCALE (1<<BITMAP_SCALE_LOG) /*#bits in the first-level bitmap indexed by 1 bit in the second-level bitmap*/
#define INIT_INTERSECTION_CNT (2)
__global__ void set_intersection_GPU_bitmap(uint32_t *d_offsets, /*card: |V|+1*/
int32_t *d_dsts, /*card: 2*|E|*/
uint32_t *d_bitmaps, /*the global bitmaps*/
uint32_t *d_bitmap_states, /*recording the usage of the bitmaps on the SM*/
uint32_t *vertex_count, /*for sequential block execution*/
uint32_t conc_blocks_per_SM, /*#concurrent blocks per SM*/
int32_t *d_intersection_count_GPU) /*card: 2*|E|*/
{
const uint32_t tid = threadIdx.x;
const uint32_t tnum = blockDim.x;
const uint32_t num_nodes = gridDim.x; /*#nodes=#blocks*/
const uint32_t elem_bits = sizeof(uint32_t) * 8; /*#bits in a bitmap element*/
const uint32_t val_size_bitmap = (num_nodes + elem_bits - 1) / elem_bits;
const uint32_t val_size_bitmap_indexes = (val_size_bitmap + BITMAP_SCALE - 1) >> BITMAP_SCALE_LOG;
__shared__ uint32_t intersection_count;
__shared__ uint32_t node_id, sm_id, bitmap_ptr;
__shared__ uint32_t start_src, end_src, start_src_in_bitmap, end_src_in_bitmap;
extern __shared__ uint32_t bitmap_indexes[];
if (tid == 0) {
node_id = atomicAdd(vertex_count, 1); /*get current vertex id*/
start_src = d_offsets[node_id];
end_src = d_offsets[node_id + 1];
start_src_in_bitmap = d_dsts[start_src] / elem_bits;
end_src_in_bitmap = (start_src == end_src) ? d_dsts[start_src] / elem_bits : d_dsts[end_src - 1] / elem_bits;
intersection_count = INIT_INTERSECTION_CNT;
} else if (tid == tnum - 1) {
uint32_t temp = 0;
asm("mov.u32 %0, %smid;" : "=r"(sm_id) );
/*get current SM*/
while (atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + temp], 0, 1) != 0)
temp++;
bitmap_ptr = temp;
}
/*initialize the 2-level bitmap*/
for (uint32_t idx = tid; idx < val_size_bitmap_indexes; idx += tnum)
bitmap_indexes[idx] = 0;
__syncthreads();
uint32_t *bitmap = &d_bitmaps[val_size_bitmap * (conc_blocks_per_SM * sm_id + bitmap_ptr)];
/*construct the source node neighbor bitmap*/
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
const uint32_t src_nei_val = src_nei / elem_bits;
atomicOr(&bitmap[src_nei_val], (0b1 << (src_nei & (elem_bits - 1)))); /*setting the bitmap*/
atomicOr(&bitmap_indexes[src_nei_val >> BITMAP_SCALE_LOG],
(0b1 << ((src_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))); /*setting the bitmap index*/
}
/*loop the neighbors*/
for (uint32_t idx = start_src; idx < end_src; idx++) {
__syncthreads();
uint32_t private_count = 0;
uint32_t src_nei = d_dsts[idx];
if (src_nei < node_id)
continue;
uint32_t start_dst = d_offsets[src_nei];
uint32_t end_dst = d_offsets[src_nei + 1];
for (uint32_t dst_idx = start_dst + tid; dst_idx < end_dst; dst_idx += tnum) {
uint32_t dst_nei = d_dsts[dst_idx];
const uint32_t dst_nei_val = dst_nei / elem_bits;
if ((bitmap_indexes[dst_nei_val >> BITMAP_SCALE_LOG] >> ((dst_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))
& 0b1 == 1)
if ((bitmap[dst_nei_val] >> (dst_nei & (elem_bits - 1))) & 0b1 == 1)
private_count++;
}
private_count += __shfl_down(private_count, 16);
private_count += __shfl_down(private_count, 8);
private_count += __shfl_down(private_count, 4);
private_count += __shfl_down(private_count, 2);
private_count += __shfl_down(private_count, 1);
if ((tid & 31) == 0)
atomicAdd(&intersection_count, private_count);
__syncthreads();
if (tid == 0) {
d_intersection_count_GPU[idx] = intersection_count;
intersection_count = INIT_INTERSECTION_CNT;
}
}
/*clean the bitmap*/
if (end_src_in_bitmap - start_src_in_bitmap + 1 <= end_src - start_src) {
for (uint32_t idx = start_src_in_bitmap + tid; idx <= end_src_in_bitmap; idx += tnum) {
bitmap[idx] = 0;
}
} else {
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
bitmap[src_nei / elem_bits] = 0;
}
}
__syncthreads();
/*release the bitmap lock*/
if (tid == 0)
atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + bitmap_ptr], 1, 0);
}
__global__ void set_intersection_GPU_bitmap_warp_per_vertex(uint32_t *d_offsets, /*card: |V|+1*/
int32_t *d_dsts, /*card: 2*|E|*/
uint32_t *d_bitmaps, /*the global bitmaps*/
uint32_t *d_bitmap_states, /*recording the usage of the bitmaps on the SM*/
uint32_t *vertex_count, /*for sequential block execution*/
uint32_t conc_blocks_per_SM, /*#concurrent blocks per SM*/
int32_t *d_intersection_count_GPU) /*card: 2*|E|*/
{
const uint32_t tid = threadIdx.x + blockDim.x * threadIdx.y; /*threads in a warp are with continuous threadIdx.x */
const uint32_t tnum = blockDim.x * blockDim.y;
const uint32_t num_nodes = gridDim.x; /*#nodes=#blocks*/
const uint32_t elem_bits = sizeof(uint32_t) * 8; /*#bits in a bitmap element*/
const uint32_t val_size_bitmap = (num_nodes + elem_bits - 1) / elem_bits;
const uint32_t val_size_bitmap_indexes = (val_size_bitmap + BITMAP_SCALE - 1) >> BITMAP_SCALE_LOG;
// __shared__ uint32_t intersection_count;
__shared__ uint32_t node_id, sm_id, bitmap_ptr;
__shared__ uint32_t start_src, end_src, start_src_in_bitmap, end_src_in_bitmap;
extern __shared__ uint32_t bitmap_indexes[];
if (tid == 0) {
node_id = atomicAdd(vertex_count, 1); /*get current vertex id*/
start_src = d_offsets[node_id];
end_src = d_offsets[node_id + 1];
start_src_in_bitmap = d_dsts[start_src] / elem_bits;
end_src_in_bitmap = (start_src == end_src) ? d_dsts[start_src] / elem_bits : d_dsts[end_src - 1] / elem_bits;
// intersection_count = 0;
} else if (tid == tnum - 1) {
uint32_t temp = 0;
asm("mov.u32 %0, %smid;" : "=r"(sm_id) );
/*get current SM*/
while (atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + temp], 0, 1) != 0)
temp++;
bitmap_ptr = temp;
}
/*initialize the 2-level bitmap*/
for (uint32_t idx = tid; idx < val_size_bitmap_indexes; idx += tnum)
bitmap_indexes[idx] = 0;
__syncthreads();
uint32_t *bitmap = &d_bitmaps[val_size_bitmap * (conc_blocks_per_SM * sm_id + bitmap_ptr)];
/*construct the source node neighbor bitmap*/
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
const uint32_t src_nei_val = src_nei / elem_bits;
atomicOr(&bitmap[src_nei_val], (0b1 << (src_nei & (elem_bits - 1)))); /*setting the bitmap*/
atomicOr(&bitmap_indexes[src_nei_val >> BITMAP_SCALE_LOG],
(0b1 << ((src_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))); /*setting the bitmap index*/
}
__syncthreads();
/*loop the neighbors*/
/* x dimension: warp-size
* y dimension: number of warps
* */
for (uint32_t idx = start_src + threadIdx.y; idx < end_src; idx += blockDim.y) {
/*each warp processes a node*/
uint32_t private_count = 0;
uint32_t src_nei = d_dsts[idx];
if (src_nei < node_id)
continue;
uint32_t start_dst = d_offsets[src_nei];
uint32_t end_dst = d_offsets[src_nei + 1];
for (uint32_t dst_idx = start_dst + threadIdx.x; dst_idx < end_dst; dst_idx += blockDim.x) {
uint32_t dst_nei = d_dsts[dst_idx];
const uint32_t dst_nei_val = dst_nei / elem_bits;
if ((bitmap_indexes[dst_nei_val >> BITMAP_SCALE_LOG] >> ((dst_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))
& 0b1 == 1)
if ((bitmap[dst_nei_val] >> (dst_nei & (elem_bits - 1))) & 0b1 == 1)
private_count++;
}
/*warp-wise reduction*/
private_count += __shfl_down(private_count, 16);
private_count += __shfl_down(private_count, 8);
private_count += __shfl_down(private_count, 4);
private_count += __shfl_down(private_count, 2);
private_count += __shfl_down(private_count, 1);
if (threadIdx.x == 0)
d_intersection_count_GPU[idx] = private_count + INIT_INTERSECTION_CNT;
}
__syncthreads();
/*clean the bitmap*/
if (end_src_in_bitmap - start_src_in_bitmap + 1 <= end_src - start_src) {
for (uint32_t idx = start_src_in_bitmap + tid; idx <= end_src_in_bitmap; idx += tnum) {
bitmap[idx] = 0;
}
} else {
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
bitmap[src_nei / elem_bits] = 0;
}
}
__syncthreads();
/*release the bitmap lock*/
if (tid == 0)
atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + bitmap_ptr], 1, 0);
}
void SCAN_XP::CheckCore(Graph *g) {
auto start = high_resolution_clock::now();
// co-processing with GPU: start a coroutine to compute the reverse offset, using binary-search
std::thread my_coroutine([this, g]() {
auto start = high_resolution_clock::now();
#pragma omp parallel for num_threads(thread_num_/2) schedule(dynamic, 60000)
for (auto i = 0u; i < g->edgemax; i++) {
// remove edge_src optimization, assuming task scheduling in FIFO-queue-mode
static thread_local auto u = 0;
u = FindSrc(g, u, i);
auto v = g->edge_dst[i];
if (u < v) {
// reverse offset
g->common_node_num[lower_bound(g->edge_dst + g->node_off[v], g->edge_dst +g->node_off[v + 1], u)- g->edge_dst] = i;
}
}
auto end = high_resolution_clock::now();
log_info("CPU corountine time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
log_info("finish cross link");
});
// copy edge_src, edge_dst, node_off to GPU
#ifndef UNIFIED_MEM
uint32_t* d_node_off;
int32_t *d_edge_dst;
int32_t *d_inter_cnt;
hipMalloc((void **) &d_node_off, (g->nodemax + 1) * sizeof(uint32_t));
hipMalloc((void **) &d_edge_dst, g->edgemax * sizeof(int32_t));
hipMalloc((void **) &d_inter_cnt, g->edgemax * sizeof(int32_t));
hipMalloc((void **) &d_node_off, (g->nodemax + 1) * sizeof(uint32_t));
hipMalloc((void **) &d_edge_dst, g->edgemax * sizeof(int32_t));
hipMalloc((void **) &d_inter_cnt, g->edgemax * sizeof(int32_t));
hipMemcpy(d_node_off, g->node_off, (g->nodemax + 1) * sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(d_edge_dst, g->edge_dst, g->edgemax * sizeof(int32_t), hipMemcpyHostToDevice);
#endif
#if defined(USE_BITMAP_KERNEL) && defined(WARP_PER_VERTEX)
uint32_t block_size = 128;
const uint32_t TITAN_XP_WARP_SIZE = 32;
dim3 t_dimension(WARP_SIZE,block_size/TITAN_XP_WARP_SIZE); /*2-D*/
#elif defined( USE_BITMAP_KERNEL)
uint32_t block_size = 32;
dim3 t_dimension(block_size);
#endif
#if defined(USE_BITMAP_KERNEL)
uint32_t *d_bitmaps, *d_vertex_count;
/*get the maximal number of threads in an SM*/
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0); /*currently 0th device*/
uint32_t max_threads_per_SM = prop.maxThreadsPerMultiProcessor;
uint32_t num_SMs = prop.multiProcessorCount;
uint32_t conc_blocks_per_SM = max_threads_per_SM / block_size; /*assume regs are not limited*/
/*initialize the bitmaps*/
const uint32_t elem_bits = sizeof(uint32_t) * 8; /*#bits in a bitmap element*/
const uint32_t val_size_bitmap = (g->nodemax + elem_bits - 1) / elem_bits;
const uint32_t val_size_bitmap_indexes = (val_size_bitmap + BITMAP_SCALE - 1) / BITMAP_SCALE;
hipMalloc((void **) &d_bitmaps, conc_blocks_per_SM * num_SMs * val_size_bitmap * sizeof(uint32_t));
hipMemset(d_bitmaps, 0, conc_blocks_per_SM * num_SMs * val_size_bitmap * sizeof(uint32_t));
/*initialize the bitmap states*/
uint32_t *d_bitmap_states;
hipMalloc((void **) &d_bitmap_states, num_SMs * conc_blocks_per_SM * sizeof(uint32_t));
hipMemset(d_bitmap_states, 0, num_SMs * conc_blocks_per_SM * sizeof(uint32_t));
/*vertex count for sequential block execution*/
hipMalloc((void **) &d_vertex_count, sizeof(uint32_t));
hipMemset(d_vertex_count, 0, sizeof(uint32_t));
log_info("bitmap bytes: %s Bytes", FormatWithCommas(
static_cast<uint64_t>(conc_blocks_per_SM) * num_SMs * val_size_bitmap * sizeof(uint32_t)).c_str());
log_info("dynamic shared mem size: %s", FormatWithCommas(static_cast<uint64_t>(val_size_bitmap_indexes)*sizeof(uint32_t)).c_str());
// hipDeviceSynchronize();
#else
// compute all intersections, do not prune currently
dim3 t_dimension(TILE_SRC, TILE_DST); /*2-D*/
#endif
hipEvent_t cuda_start, cuda_end;
hipEventCreate(&cuda_start);
hipEventCreate(&cuda_end);
float time_GPU;
hipEventRecord(cuda_start);
#if defined(USE_BITMAP_KERNEL) && defined(WARP_PER_VERTEX) && defined(UNIFIED_MEM)
hipLaunchKernelGGL(( set_intersection_GPU_bitmap_warp_per_vertex), dim3(g->nodemax), dim3(t_dimension), val_size_bitmap_indexes*sizeof(uint32_t), 0,
g->node_off, g->edge_dst, d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, g->common_node_num);
#elif defined(USE_BITMAP_KERNEL) && defined(UNIFIED_MEM)
hipLaunchKernelGGL(( set_intersection_GPU_bitmap), dim3(g->nodemax), dim3(t_dimension), val_size_bitmap_indexes*sizeof(uint32_t), 0, g->node_off, g->edge_dst,
d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, g->common_node_num);
#elif defined(UNIFIED_MEM)
hipLaunchKernelGGL(( set_intersection_GPU_shared), dim3(g->nodemax), dim3(t_dimension), 0, 0, g->node_off, g->edge_dst, g->common_node_num);
#elif defined(USE_BITMAP_KERNEL) && defined(WARP_PER_VERTEX)
hipLaunchKernelGGL(( set_intersection_GPU_bitmap_warp_per_vertex), dim3(g->nodemax), dim3(t_dimension), val_size_bitmap_indexes*sizeof(uint32_t), 0,
d_node_off, d_edge_dst, d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, d_inter_cnt);
#elif defined(USE_BITMAP_KERNEL)
hipLaunchKernelGGL(( set_intersection_GPU_bitmap), dim3(g->nodemax), dim3(t_dimension), val_size_bitmap_indexes*sizeof(uint32_t), 0, d_node_off, d_edge_dst,
d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, d_inter_cnt);
#else
hipLaunchKernelGGL(( set_intersection_GPU_shared), dim3(g->nodemax), dim3(t_dimension), 0, 0, d_node_off, d_edge_dst, d_inter_cnt);
#endif
hipEventRecord(cuda_end);
hipEventSynchronize(cuda_start);
hipEventSynchronize(cuda_end);
hipEventElapsedTime(&time_GPU, cuda_start, cuda_end);
log_info("CUDA Kernel Time: %.3lf ms", time_GPU);
gpuErrchk(hipPeekAtLastError());
// copy back the intersection cnt
#ifndef UNIFIED_MEM
hipMemcpy(g->common_node_num, d_inter_cnt, g->edgemax * sizeof(int32_t), hipMemcpyDeviceToHost);
hipFree(d_node_off);
hipFree(d_edge_dst);
hipFree(d_inter_cnt);
#else
hipDeviceSynchronize(); // ensure the kernel execution finishsed
#endif
auto end = high_resolution_clock::now();
log_info("CUDA kernel lauch cost: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
// 4th: join the coroutine, assign the remaining intersection-count values
my_coroutine.join();
PostComputeCoreChecking(this, g, min_u_, epsilon_);
}
| 3c60c94a0f2e94c8339fad68aa46b46c8ddcc71f.cu | #include "../cuda_utils/set_inter_device_functions.cuh"
#include "../cuda_utils/cuda_util.cuh"
#include "../scan_xp_common.cu" // copy the common codes and paste here, just a pre-processing by compiler
#ifndef TILE_SRC
#define TILE_SRC (4)
#endif
#ifndef TILE_DST
#define TILE_DST (8)
#endif
__global__ void set_intersection_GPU_shared(uint32_t *d_offsets, /*card: |V|+1*/
int32_t *d_dsts, /*card: 2*|E|*/
int32_t *d_intersection_count_GPU) /*card: 2*|E|*/
{
const uint32_t tid = threadIdx.x * blockDim.y + threadIdx.y; // in [0, 32)
const uint32_t u = blockIdx.x;
const uint32_t off_u_beg = d_offsets[u];
const uint32_t off_u_end = d_offsets[u + 1];
#if defined(BASELINE)
for (uint32_t off_u_iter = off_u_beg + tid; off_u_iter < off_u_end; off_u_iter += 32) {
auto v = d_dsts[off_u_iter];
if (u > v)
continue; /*skip when u > v*/
d_intersection_count_GPU[off_u_iter] = 2 + ComputeCNNaiveStdMergeDevice(d_offsets, d_dsts, u, v);
}
#elif defined(BASELINE_HYBRID)
for (uint32_t off_u_iter = off_u_beg + tid; off_u_iter < off_u_end; off_u_iter += 32) {
auto v = d_dsts[off_u_iter];
if (u > v)
continue; /*skip when u > v*/
d_intersection_count_GPU[off_u_iter] = 2 + ComputeCNHybridDevice(d_offsets, d_dsts, u, d_dsts[off_u_iter]);
}
#else
#ifndef WARP_SIZE
#define WARP_SIZE 32
#endif
#ifndef SHARED_MEM_SIZE
#define SHARED_MEM_SIZE 64
#endif
__shared__ int32_t u_neis[SHARED_MEM_SIZE], v_neis[SHARED_MEM_SIZE];
/*traverse all the neighbors(destination nodes)*/
for (uint32_t off_u_iter = d_offsets[u]; off_u_iter < off_u_end; off_u_iter++) {
const int32_t v = d_dsts[off_u_iter];
uint32_t private_count = 0;
if (u > v)
continue; /*skip when u > v*/
uint32_t off_u = off_u_beg;
uint32_t off_v_beg = d_offsets[v];
uint32_t off_v = off_v_beg;
uint32_t off_v_end = d_offsets[v + 1];
// first-time load
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_u + i < off_u_end)
u_neis[i] = d_dsts[off_u + i];
}
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_v + i < off_v_end)
v_neis[i] = d_dsts[off_v + i];
}
while (true) {
// commit 32 comparisons
uint32_t off_u_local = threadIdx.x + off_u; /*A[0-3]*/
uint32_t off_v_local = threadIdx.y + off_v; /*B[0-7]*/
// 1st: all-pairs comparisons
uint32_t elem_src =
(off_u_local < off_u_end) ? u_neis[(off_u_local - off_u_beg) % SHARED_MEM_SIZE] : (UINT32_MAX);
uint32_t elem_dst =
(off_v_local < off_v_end) ? v_neis[(off_v_local - off_v_beg) % SHARED_MEM_SIZE] : (UINT32_MAX - 1);
if (elem_src == elem_dst)
private_count++;
// 2nd: advance by 4 elements in A or 8 elements in B
elem_src =
(off_u + TILE_SRC - 1 < off_u_end) ?
u_neis[(off_u + TILE_SRC - 1 - off_u_beg) % SHARED_MEM_SIZE] : (UINT32_MAX);
elem_dst =
(off_v + TILE_DST - 1 < off_v_end) ?
v_neis[(off_v + TILE_DST - 1 - off_v_beg) % SHARED_MEM_SIZE] : (UINT32_MAX);
// check whether to exit
if (elem_src == UINT32_MAX && elem_dst == UINT32_MAX)
break;
if (elem_src < elem_dst) {
off_u += TILE_SRC;
if ((off_u - off_u_beg) % SHARED_MEM_SIZE == 0) {
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_u + i < off_u_end)
u_neis[i] = d_dsts[off_u + i];
}
}
} else {
off_v += TILE_DST;
if ((off_v - off_v_beg) % SHARED_MEM_SIZE == 0) {
for (auto i = tid; i < SHARED_MEM_SIZE; i += WARP_SIZE) {
if (off_v + i < off_v_end)
v_neis[i] = d_dsts[off_v + i];
}
}
}
}
/*single warp reduction*/
for (int offset = 16; offset > 0; offset >>= 1)
private_count += __shfl_down(private_count, offset);
if (tid == 0)
d_intersection_count_GPU[off_u_iter] = 2 + private_count;
}
#endif
}
/*for bitmap-based set intersection*/
#define BITMAP_SCALE_LOG (9)
#define BITMAP_SCALE (1<<BITMAP_SCALE_LOG) /*#bits in the first-level bitmap indexed by 1 bit in the second-level bitmap*/
#define INIT_INTERSECTION_CNT (2)
__global__ void set_intersection_GPU_bitmap(uint32_t *d_offsets, /*card: |V|+1*/
int32_t *d_dsts, /*card: 2*|E|*/
uint32_t *d_bitmaps, /*the global bitmaps*/
uint32_t *d_bitmap_states, /*recording the usage of the bitmaps on the SM*/
uint32_t *vertex_count, /*for sequential block execution*/
uint32_t conc_blocks_per_SM, /*#concurrent blocks per SM*/
int32_t *d_intersection_count_GPU) /*card: 2*|E|*/
{
const uint32_t tid = threadIdx.x;
const uint32_t tnum = blockDim.x;
const uint32_t num_nodes = gridDim.x; /*#nodes=#blocks*/
const uint32_t elem_bits = sizeof(uint32_t) * 8; /*#bits in a bitmap element*/
const uint32_t val_size_bitmap = (num_nodes + elem_bits - 1) / elem_bits;
const uint32_t val_size_bitmap_indexes = (val_size_bitmap + BITMAP_SCALE - 1) >> BITMAP_SCALE_LOG;
__shared__ uint32_t intersection_count;
__shared__ uint32_t node_id, sm_id, bitmap_ptr;
__shared__ uint32_t start_src, end_src, start_src_in_bitmap, end_src_in_bitmap;
extern __shared__ uint32_t bitmap_indexes[];
if (tid == 0) {
node_id = atomicAdd(vertex_count, 1); /*get current vertex id*/
start_src = d_offsets[node_id];
end_src = d_offsets[node_id + 1];
start_src_in_bitmap = d_dsts[start_src] / elem_bits;
end_src_in_bitmap = (start_src == end_src) ? d_dsts[start_src] / elem_bits : d_dsts[end_src - 1] / elem_bits;
intersection_count = INIT_INTERSECTION_CNT;
} else if (tid == tnum - 1) {
uint32_t temp = 0;
asm("mov.u32 %0, %smid;" : "=r"(sm_id) );
/*get current SM*/
while (atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + temp], 0, 1) != 0)
temp++;
bitmap_ptr = temp;
}
/*initialize the 2-level bitmap*/
for (uint32_t idx = tid; idx < val_size_bitmap_indexes; idx += tnum)
bitmap_indexes[idx] = 0;
__syncthreads();
uint32_t *bitmap = &d_bitmaps[val_size_bitmap * (conc_blocks_per_SM * sm_id + bitmap_ptr)];
/*construct the source node neighbor bitmap*/
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
const uint32_t src_nei_val = src_nei / elem_bits;
atomicOr(&bitmap[src_nei_val], (0b1 << (src_nei & (elem_bits - 1)))); /*setting the bitmap*/
atomicOr(&bitmap_indexes[src_nei_val >> BITMAP_SCALE_LOG],
(0b1 << ((src_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))); /*setting the bitmap index*/
}
/*loop the neighbors*/
for (uint32_t idx = start_src; idx < end_src; idx++) {
__syncthreads();
uint32_t private_count = 0;
uint32_t src_nei = d_dsts[idx];
if (src_nei < node_id)
continue;
uint32_t start_dst = d_offsets[src_nei];
uint32_t end_dst = d_offsets[src_nei + 1];
for (uint32_t dst_idx = start_dst + tid; dst_idx < end_dst; dst_idx += tnum) {
uint32_t dst_nei = d_dsts[dst_idx];
const uint32_t dst_nei_val = dst_nei / elem_bits;
if ((bitmap_indexes[dst_nei_val >> BITMAP_SCALE_LOG] >> ((dst_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))
& 0b1 == 1)
if ((bitmap[dst_nei_val] >> (dst_nei & (elem_bits - 1))) & 0b1 == 1)
private_count++;
}
private_count += __shfl_down(private_count, 16);
private_count += __shfl_down(private_count, 8);
private_count += __shfl_down(private_count, 4);
private_count += __shfl_down(private_count, 2);
private_count += __shfl_down(private_count, 1);
if ((tid & 31) == 0)
atomicAdd(&intersection_count, private_count);
__syncthreads();
if (tid == 0) {
d_intersection_count_GPU[idx] = intersection_count;
intersection_count = INIT_INTERSECTION_CNT;
}
}
/*clean the bitmap*/
if (end_src_in_bitmap - start_src_in_bitmap + 1 <= end_src - start_src) {
for (uint32_t idx = start_src_in_bitmap + tid; idx <= end_src_in_bitmap; idx += tnum) {
bitmap[idx] = 0;
}
} else {
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
bitmap[src_nei / elem_bits] = 0;
}
}
__syncthreads();
/*release the bitmap lock*/
if (tid == 0)
atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + bitmap_ptr], 1, 0);
}
__global__ void set_intersection_GPU_bitmap_warp_per_vertex(uint32_t *d_offsets, /*card: |V|+1*/
int32_t *d_dsts, /*card: 2*|E|*/
uint32_t *d_bitmaps, /*the global bitmaps*/
uint32_t *d_bitmap_states, /*recording the usage of the bitmaps on the SM*/
uint32_t *vertex_count, /*for sequential block execution*/
uint32_t conc_blocks_per_SM, /*#concurrent blocks per SM*/
int32_t *d_intersection_count_GPU) /*card: 2*|E|*/
{
const uint32_t tid = threadIdx.x + blockDim.x * threadIdx.y; /*threads in a warp are with continuous threadIdx.x */
const uint32_t tnum = blockDim.x * blockDim.y;
const uint32_t num_nodes = gridDim.x; /*#nodes=#blocks*/
const uint32_t elem_bits = sizeof(uint32_t) * 8; /*#bits in a bitmap element*/
const uint32_t val_size_bitmap = (num_nodes + elem_bits - 1) / elem_bits;
const uint32_t val_size_bitmap_indexes = (val_size_bitmap + BITMAP_SCALE - 1) >> BITMAP_SCALE_LOG;
// __shared__ uint32_t intersection_count;
__shared__ uint32_t node_id, sm_id, bitmap_ptr;
__shared__ uint32_t start_src, end_src, start_src_in_bitmap, end_src_in_bitmap;
extern __shared__ uint32_t bitmap_indexes[];
if (tid == 0) {
node_id = atomicAdd(vertex_count, 1); /*get current vertex id*/
start_src = d_offsets[node_id];
end_src = d_offsets[node_id + 1];
start_src_in_bitmap = d_dsts[start_src] / elem_bits;
end_src_in_bitmap = (start_src == end_src) ? d_dsts[start_src] / elem_bits : d_dsts[end_src - 1] / elem_bits;
// intersection_count = 0;
} else if (tid == tnum - 1) {
uint32_t temp = 0;
asm("mov.u32 %0, %smid;" : "=r"(sm_id) );
/*get current SM*/
while (atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + temp], 0, 1) != 0)
temp++;
bitmap_ptr = temp;
}
/*initialize the 2-level bitmap*/
for (uint32_t idx = tid; idx < val_size_bitmap_indexes; idx += tnum)
bitmap_indexes[idx] = 0;
__syncthreads();
uint32_t *bitmap = &d_bitmaps[val_size_bitmap * (conc_blocks_per_SM * sm_id + bitmap_ptr)];
/*construct the source node neighbor bitmap*/
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
const uint32_t src_nei_val = src_nei / elem_bits;
atomicOr(&bitmap[src_nei_val], (0b1 << (src_nei & (elem_bits - 1)))); /*setting the bitmap*/
atomicOr(&bitmap_indexes[src_nei_val >> BITMAP_SCALE_LOG],
(0b1 << ((src_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))); /*setting the bitmap index*/
}
__syncthreads();
/*loop the neighbors*/
/* x dimension: warp-size
* y dimension: number of warps
* */
for (uint32_t idx = start_src + threadIdx.y; idx < end_src; idx += blockDim.y) {
/*each warp processes a node*/
uint32_t private_count = 0;
uint32_t src_nei = d_dsts[idx];
if (src_nei < node_id)
continue;
uint32_t start_dst = d_offsets[src_nei];
uint32_t end_dst = d_offsets[src_nei + 1];
for (uint32_t dst_idx = start_dst + threadIdx.x; dst_idx < end_dst; dst_idx += blockDim.x) {
uint32_t dst_nei = d_dsts[dst_idx];
const uint32_t dst_nei_val = dst_nei / elem_bits;
if ((bitmap_indexes[dst_nei_val >> BITMAP_SCALE_LOG] >> ((dst_nei >> BITMAP_SCALE_LOG) & (elem_bits - 1)))
& 0b1 == 1)
if ((bitmap[dst_nei_val] >> (dst_nei & (elem_bits - 1))) & 0b1 == 1)
private_count++;
}
/*warp-wise reduction*/
private_count += __shfl_down(private_count, 16);
private_count += __shfl_down(private_count, 8);
private_count += __shfl_down(private_count, 4);
private_count += __shfl_down(private_count, 2);
private_count += __shfl_down(private_count, 1);
if (threadIdx.x == 0)
d_intersection_count_GPU[idx] = private_count + INIT_INTERSECTION_CNT;
}
__syncthreads();
/*clean the bitmap*/
if (end_src_in_bitmap - start_src_in_bitmap + 1 <= end_src - start_src) {
for (uint32_t idx = start_src_in_bitmap + tid; idx <= end_src_in_bitmap; idx += tnum) {
bitmap[idx] = 0;
}
} else {
for (uint32_t idx = start_src + tid; idx < end_src; idx += tnum) {
uint32_t src_nei = d_dsts[idx];
bitmap[src_nei / elem_bits] = 0;
}
}
__syncthreads();
/*release the bitmap lock*/
if (tid == 0)
atomicCAS(&d_bitmap_states[sm_id * conc_blocks_per_SM + bitmap_ptr], 1, 0);
}
void SCAN_XP::CheckCore(Graph *g) {
auto start = high_resolution_clock::now();
// co-processing with GPU: start a coroutine to compute the reverse offset, using binary-search
std::thread my_coroutine([this, g]() {
auto start = high_resolution_clock::now();
#pragma omp parallel for num_threads(thread_num_/2) schedule(dynamic, 60000)
for (auto i = 0u; i < g->edgemax; i++) {
// remove edge_src optimization, assuming task scheduling in FIFO-queue-mode
static thread_local auto u = 0;
u = FindSrc(g, u, i);
auto v = g->edge_dst[i];
if (u < v) {
// reverse offset
g->common_node_num[lower_bound(g->edge_dst + g->node_off[v], g->edge_dst +g->node_off[v + 1], u)- g->edge_dst] = i;
}
}
auto end = high_resolution_clock::now();
log_info("CPU corountine time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
log_info("finish cross link");
});
// copy edge_src, edge_dst, node_off to GPU
#ifndef UNIFIED_MEM
uint32_t* d_node_off;
int32_t *d_edge_dst;
int32_t *d_inter_cnt;
cudaMalloc((void **) &d_node_off, (g->nodemax + 1) * sizeof(uint32_t));
cudaMalloc((void **) &d_edge_dst, g->edgemax * sizeof(int32_t));
cudaMalloc((void **) &d_inter_cnt, g->edgemax * sizeof(int32_t));
cudaMalloc((void **) &d_node_off, (g->nodemax + 1) * sizeof(uint32_t));
cudaMalloc((void **) &d_edge_dst, g->edgemax * sizeof(int32_t));
cudaMalloc((void **) &d_inter_cnt, g->edgemax * sizeof(int32_t));
cudaMemcpy(d_node_off, g->node_off, (g->nodemax + 1) * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_edge_dst, g->edge_dst, g->edgemax * sizeof(int32_t), cudaMemcpyHostToDevice);
#endif
#if defined(USE_BITMAP_KERNEL) && defined(WARP_PER_VERTEX)
uint32_t block_size = 128;
const uint32_t TITAN_XP_WARP_SIZE = 32;
dim3 t_dimension(WARP_SIZE,block_size/TITAN_XP_WARP_SIZE); /*2-D*/
#elif defined( USE_BITMAP_KERNEL)
uint32_t block_size = 32;
dim3 t_dimension(block_size);
#endif
#if defined(USE_BITMAP_KERNEL)
uint32_t *d_bitmaps, *d_vertex_count;
/*get the maximal number of threads in an SM*/
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0); /*currently 0th device*/
uint32_t max_threads_per_SM = prop.maxThreadsPerMultiProcessor;
uint32_t num_SMs = prop.multiProcessorCount;
uint32_t conc_blocks_per_SM = max_threads_per_SM / block_size; /*assume regs are not limited*/
/*initialize the bitmaps*/
const uint32_t elem_bits = sizeof(uint32_t) * 8; /*#bits in a bitmap element*/
const uint32_t val_size_bitmap = (g->nodemax + elem_bits - 1) / elem_bits;
const uint32_t val_size_bitmap_indexes = (val_size_bitmap + BITMAP_SCALE - 1) / BITMAP_SCALE;
cudaMalloc((void **) &d_bitmaps, conc_blocks_per_SM * num_SMs * val_size_bitmap * sizeof(uint32_t));
cudaMemset(d_bitmaps, 0, conc_blocks_per_SM * num_SMs * val_size_bitmap * sizeof(uint32_t));
/*initialize the bitmap states*/
uint32_t *d_bitmap_states;
cudaMalloc((void **) &d_bitmap_states, num_SMs * conc_blocks_per_SM * sizeof(uint32_t));
cudaMemset(d_bitmap_states, 0, num_SMs * conc_blocks_per_SM * sizeof(uint32_t));
/*vertex count for sequential block execution*/
cudaMalloc((void **) &d_vertex_count, sizeof(uint32_t));
cudaMemset(d_vertex_count, 0, sizeof(uint32_t));
log_info("bitmap bytes: %s Bytes", FormatWithCommas(
static_cast<uint64_t>(conc_blocks_per_SM) * num_SMs * val_size_bitmap * sizeof(uint32_t)).c_str());
log_info("dynamic shared mem size: %s", FormatWithCommas(static_cast<uint64_t>(val_size_bitmap_indexes)*sizeof(uint32_t)).c_str());
// cudaDeviceSynchronize();
#else
// compute all intersections, do not prune currently
dim3 t_dimension(TILE_SRC, TILE_DST); /*2-D*/
#endif
cudaEvent_t cuda_start, cuda_end;
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_end);
float time_GPU;
cudaEventRecord(cuda_start);
#if defined(USE_BITMAP_KERNEL) && defined(WARP_PER_VERTEX) && defined(UNIFIED_MEM)
set_intersection_GPU_bitmap_warp_per_vertex<<<g->nodemax, t_dimension, val_size_bitmap_indexes*sizeof(uint32_t)>>>(
g->node_off, g->edge_dst, d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, g->common_node_num);
#elif defined(USE_BITMAP_KERNEL) && defined(UNIFIED_MEM)
set_intersection_GPU_bitmap<<<g->nodemax, t_dimension, val_size_bitmap_indexes*sizeof(uint32_t)>>>(g->node_off, g->edge_dst,
d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, g->common_node_num);
#elif defined(UNIFIED_MEM)
set_intersection_GPU_shared<<<g->nodemax, t_dimension>>>(g->node_off, g->edge_dst, g->common_node_num);
#elif defined(USE_BITMAP_KERNEL) && defined(WARP_PER_VERTEX)
set_intersection_GPU_bitmap_warp_per_vertex<<<g->nodemax, t_dimension, val_size_bitmap_indexes*sizeof(uint32_t)>>>(
d_node_off, d_edge_dst, d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, d_inter_cnt);
#elif defined(USE_BITMAP_KERNEL)
set_intersection_GPU_bitmap<<<g->nodemax, t_dimension, val_size_bitmap_indexes*sizeof(uint32_t)>>>(d_node_off, d_edge_dst,
d_bitmaps, d_bitmap_states, d_vertex_count, conc_blocks_per_SM, d_inter_cnt);
#else
set_intersection_GPU_shared<<<g->nodemax, t_dimension>>>(d_node_off, d_edge_dst, d_inter_cnt);
#endif
cudaEventRecord(cuda_end);
cudaEventSynchronize(cuda_start);
cudaEventSynchronize(cuda_end);
cudaEventElapsedTime(&time_GPU, cuda_start, cuda_end);
log_info("CUDA Kernel Time: %.3lf ms", time_GPU);
gpuErrchk(cudaPeekAtLastError());
// copy back the intersection cnt
#ifndef UNIFIED_MEM
cudaMemcpy(g->common_node_num, d_inter_cnt, g->edgemax * sizeof(int32_t), cudaMemcpyDeviceToHost);
cudaFree(d_node_off);
cudaFree(d_edge_dst);
cudaFree(d_inter_cnt);
#else
cudaDeviceSynchronize(); // ensure the kernel execution finishsed
#endif
auto end = high_resolution_clock::now();
log_info("CUDA kernel lauch cost: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
// 4th: join the coroutine, assign the remaining intersection-count values
my_coroutine.join();
PostComputeCoreChecking(this, g, min_u_, epsilon_);
}
|
9df7df6fd2bdd904bde67d3b1cb92be26e3fe8ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "Matrix.hpp"
#include "../checks/CudaChecks.hpp"
#include "../preproc.hpp"
Matrix::Matrix(const unsigned int &height, const unsigned int &width) : width_(width), height_(height)
{__LOG__
smart_ptr_counter_ = new int(1);
cudaErrorCheck(hipMalloc((void **) &data_, width_ * height_ * sizeof(double)));
}
Matrix::Matrix(const Matrix &m) : width_(m.width_), height_(m.height_), data_(m.data_), smart_ptr_counter_(m.smart_ptr_counter_)
{__LOG__
(*smart_ptr_counter_) += 1;
}
Matrix::Matrix(const double *h_m, const unsigned int &height, const unsigned int &width) : width_(width), height_(height)
{__LOG__
smart_ptr_counter_ = new int(1);
cudaErrorCheck(hipMalloc((void **) &data_, width_ * height_ * sizeof(double)));
cudaErrorCheck(hipMemcpy(data_, h_m, width_ * height_ * sizeof(double), hipMemcpyHostToDevice));
}
Matrix::~Matrix()
{
if (! data_)
{__LOG__
return;
}
__LOG(*smart_ptr_counter_)
if (*smart_ptr_counter_ > 1) {
(*smart_ptr_counter_) -= 1;
return;
}
delete smart_ptr_counter_;
cudaErrorCheck(hipFree(data_));
}
void Matrix::free()
{
if (! data_)
{__LOG__
return;
}
__LOG(*smart_ptr_counter_)
if(*smart_ptr_counter_ > 1) {// cuda-kernel constructs a copy of the object and then call its destructor
(*smart_ptr_counter_) -= 1;
data_ = 0;
return;
}
delete smart_ptr_counter_;
cudaErrorCheck(hipFree(data_));
data_ = 0;
}
void Matrix::memsetZero()
{__LOG__
cudaErrorCheck(hipMemset(data_, 0, width_ * height_ * sizeof(double)));
}
__device__ double& Matrix::get(const unsigned int &i, const unsigned int &j) const {
return data_[i * width_ + j];
}
__device__ __host__ unsigned int Matrix::getWidth() const { return width_; }
__device__ __host__ unsigned int Matrix::getHeight() const { return height_; }
| 9df7df6fd2bdd904bde67d3b1cb92be26e3fe8ad.cu | #include "Matrix.hpp"
#include "../checks/CudaChecks.hpp"
#include "../preproc.hpp"
Matrix::Matrix(const unsigned int &height, const unsigned int &width) : width_(width), height_(height)
{__LOG__
smart_ptr_counter_ = new int(1);
cudaErrorCheck(cudaMalloc((void **) &data_, width_ * height_ * sizeof(double)));
}
Matrix::Matrix(const Matrix &m) : width_(m.width_), height_(m.height_), data_(m.data_), smart_ptr_counter_(m.smart_ptr_counter_)
{__LOG__
(*smart_ptr_counter_) += 1;
}
Matrix::Matrix(const double *h_m, const unsigned int &height, const unsigned int &width) : width_(width), height_(height)
{__LOG__
smart_ptr_counter_ = new int(1);
cudaErrorCheck(cudaMalloc((void **) &data_, width_ * height_ * sizeof(double)));
cudaErrorCheck(cudaMemcpy(data_, h_m, width_ * height_ * sizeof(double), cudaMemcpyHostToDevice));
}
Matrix::~Matrix()
{
if (! data_)
{__LOG__
return;
}
__LOG(*smart_ptr_counter_)
if (*smart_ptr_counter_ > 1) {
(*smart_ptr_counter_) -= 1;
return;
}
delete smart_ptr_counter_;
cudaErrorCheck(cudaFree(data_));
}
void Matrix::free()
{
if (! data_)
{__LOG__
return;
}
__LOG(*smart_ptr_counter_)
if(*smart_ptr_counter_ > 1) {// cuda-kernel constructs a copy of the object and then call its destructor
(*smart_ptr_counter_) -= 1;
data_ = 0;
return;
}
delete smart_ptr_counter_;
cudaErrorCheck(cudaFree(data_));
data_ = 0;
}
void Matrix::memsetZero()
{__LOG__
cudaErrorCheck(cudaMemset(data_, 0, width_ * height_ * sizeof(double)));
}
__device__ double& Matrix::get(const unsigned int &i, const unsigned int &j) const {
return data_[i * width_ + j];
}
__device__ __host__ unsigned int Matrix::getWidth() const { return width_; }
__device__ __host__ unsigned int Matrix::getHeight() const { return height_; }
|
dd499aa965c57db687051508192ebe59599060e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "devInverseReindexInt3Bool.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int3 *destArray = NULL;
hipMalloc(&destArray, XSIZE*YSIZE);
int3 *srcArray = NULL;
hipMalloc(&srcArray, XSIZE*YSIZE);
unsigned int *reindex = NULL;
hipMalloc(&reindex, XSIZE*YSIZE);
int realSize = XSIZE*YSIZE;
int nDims = 1;
int maxValue = 1;
bool ignoreValue = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
devInverseReindexInt3Bool), dim3(gridBlock),dim3(threadBlock), 0, 0, N,destArray,srcArray,reindex,realSize,nDims,maxValue,ignoreValue);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
devInverseReindexInt3Bool), dim3(gridBlock),dim3(threadBlock), 0, 0, N,destArray,srcArray,reindex,realSize,nDims,maxValue,ignoreValue);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
devInverseReindexInt3Bool), dim3(gridBlock),dim3(threadBlock), 0, 0, N,destArray,srcArray,reindex,realSize,nDims,maxValue,ignoreValue);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | dd499aa965c57db687051508192ebe59599060e0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "devInverseReindexInt3Bool.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int3 *destArray = NULL;
cudaMalloc(&destArray, XSIZE*YSIZE);
int3 *srcArray = NULL;
cudaMalloc(&srcArray, XSIZE*YSIZE);
unsigned int *reindex = NULL;
cudaMalloc(&reindex, XSIZE*YSIZE);
int realSize = XSIZE*YSIZE;
int nDims = 1;
int maxValue = 1;
bool ignoreValue = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
devInverseReindexInt3Bool<<<gridBlock,threadBlock>>>(N,destArray,srcArray,reindex,realSize,nDims,maxValue,ignoreValue);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
devInverseReindexInt3Bool<<<gridBlock,threadBlock>>>(N,destArray,srcArray,reindex,realSize,nDims,maxValue,ignoreValue);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
devInverseReindexInt3Bool<<<gridBlock,threadBlock>>>(N,destArray,srcArray,reindex,realSize,nDims,maxValue,ignoreValue);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6fa3dfb300aa8bc392339f3911b0b3621cf205ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
#include <math.h>
#include <numpy/arrayobject.h>
#include <numpy/arrayscalars.h>
//////////////////////
//// Support Code
//////////////////////
namespace {
struct __struct_compiled_op_c4a12f4110948fee8dd968510835197b {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V1;
__struct_compiled_op_c4a12f4110948fee8dd968510835197b() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_c4a12f4110948fee8dd968510835197b(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_12:
double __DUMMY_12;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
PyArrayObject* V5;
typedef npy_int64 dtype_V5;
PyObject* py_V7;
PyArrayObject* V7;
typedef npy_int64 dtype_V7;
PyObject* py_V9;
PyArrayObject* V9;
typedef npy_int64 dtype_V9;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 0)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 0",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
V5 = NULL;
if (py_V5 == Py_None) {
// We can either fail here or set V5 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
if (!PyArray_Check(py_V5)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V5)) {
PyArrayObject * tmp = (PyArrayObject*) py_V5;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V5),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V5) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V5));
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
V5 = (PyArrayObject*)(py_V5);
Py_XINCREF(V5);
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
V7 = NULL;
if (py_V7 == Py_None) {
// We can either fail here or set V7 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
if (!PyArray_Check(py_V7)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V7)) {
PyArrayObject * tmp = (PyArrayObject*) py_V7;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V7),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V7) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V7));
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
V7 = (PyArrayObject*)(py_V7);
Py_XINCREF(V7);
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
V9 = NULL;
if (py_V9 == Py_None) {
// We can either fail here or set V9 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
if (!PyArray_Check(py_V9)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V9)) {
PyArrayObject * tmp = (PyArrayObject*) py_V9;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V9),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V9) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V9));
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
V9 = (PyArrayObject*)(py_V9);
Py_XINCREF(V9);
{
// Op class GpuAlloc
int dims[3];
dims[0] = PyInt_AsLong((PyObject*)V5);
dims[1] = PyInt_AsLong((PyObject*)V7);
dims[2] = PyInt_AsLong((PyObject*)V9);
if(V1==NULL
||CudaNdarray_HOST_DIMS(V1)[0]!=dims[0]||CudaNdarray_HOST_DIMS(V1)[1]!=dims[1]||CudaNdarray_HOST_DIMS(V1)[2]!=dims[2]){
Py_XDECREF(V1);
V1 = (CudaNdarray*)CudaNdarray_New();
if (!V1)
{
// exception already set
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
if (CudaNdarray_alloc_contiguous(V1, 3, dims))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
if (1 && CudaNdarray_is_c_contiguous(V1))
{
hipError_t err = hipMemset(V1->devdata, 0,
CudaNdarray_SIZE(V1) * 4);
if (hipSuccess != err)
{
PyErr_Format(PyExc_MemoryError,
"GpuAlloc: Error memsetting %ld"
" bytes of device memory. %s",
(long)(CudaNdarray_SIZE(V1) * 4),
hipGetErrorString(err));
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
else if (CudaNdarray_CopyFromCudaNdarray(V1, V3, true))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
__label_11:
double __DUMMY_11;
}
__label_10:
if (V9) {
Py_XDECREF(V9);
}
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
if (V7) {
Py_XDECREF(V7);
}
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
if (V5) {
Py_XDECREF(V5);
}
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_c4a12f4110948fee8dd968510835197b_executor(__struct_compiled_op_c4a12f4110948fee8dd968510835197b* self) {
return self->run();
}
static void __struct_compiled_op_c4a12f4110948fee8dd968510835197b_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_c4a12f4110948fee8dd968510835197b*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (6 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 6, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_c4a12f4110948fee8dd968510835197b* struct_ptr = new __struct_compiled_op_c4a12f4110948fee8dd968510835197b();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_c4a12f4110948fee8dd968510835197b_executor), struct_ptr, __struct_compiled_op_c4a12f4110948fee8dd968510835197b_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC initc4a12f4110948fee8dd968510835197b(void){
import_array();
(void) Py_InitModule("c4a12f4110948fee8dd968510835197b", MyMethods);
}
| 6fa3dfb300aa8bc392339f3911b0b3621cf205ae.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
#include <math.h>
#include <numpy/arrayobject.h>
#include <numpy/arrayscalars.h>
//////////////////////
//// Support Code
//////////////////////
namespace {
struct __struct_compiled_op_c4a12f4110948fee8dd968510835197b {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V1;
__struct_compiled_op_c4a12f4110948fee8dd968510835197b() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_c4a12f4110948fee8dd968510835197b(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_12:
double __DUMMY_12;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
PyArrayObject* V5;
typedef npy_int64 dtype_V5;
PyObject* py_V7;
PyArrayObject* V7;
typedef npy_int64 dtype_V7;
PyObject* py_V9;
PyArrayObject* V9;
typedef npy_int64 dtype_V9;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 3)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 3",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 0)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 0",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
V5 = NULL;
if (py_V5 == Py_None) {
// We can either fail here or set V5 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
if (!PyArray_Check(py_V5)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V5)) {
PyArrayObject * tmp = (PyArrayObject*) py_V5;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V5),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V5) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V5));
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;}
}
V5 = (PyArrayObject*)(py_V5);
Py_XINCREF(V5);
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
V7 = NULL;
if (py_V7 == Py_None) {
// We can either fail here or set V7 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
if (!PyArray_Check(py_V7)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V7)) {
PyArrayObject * tmp = (PyArrayObject*) py_V7;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V7),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V7) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V7));
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;}
}
V7 = (PyArrayObject*)(py_V7);
Py_XINCREF(V7);
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
V9 = NULL;
if (py_V9 == Py_None) {
// We can either fail here or set V9 to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
if (!PyArray_Check(py_V9)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// We expect NPY_INT64
if (!PyArray_ISALIGNED((PyArrayObject*) py_V9)) {
PyArrayObject * tmp = (PyArrayObject*) py_V9;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %ld "
"(NPY_INT64), got non-aligned array of type %ld"
" with %ld dimensions, with 3 last dims "
"%ld, %ld, %ld"
" and 3 last strides %ld %ld, %ld.",
(long int) NPY_INT64,
(long int) PyArray_TYPE((PyArrayObject*) py_V9),
(long int) PyArray_NDIM(tmp),
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1,
(long int) PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1,
(long int) PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1,
(long int) PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1
);
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_V9) != NPY_INT64) {
PyErr_Format(PyExc_TypeError,
"expected type_num %d (NPY_INT64) got %d",
NPY_INT64, PyArray_TYPE((PyArrayObject*) py_V9));
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;}
}
V9 = (PyArrayObject*)(py_V9);
Py_XINCREF(V9);
{
// Op class GpuAlloc
int dims[3];
dims[0] = PyInt_AsLong((PyObject*)V5);
dims[1] = PyInt_AsLong((PyObject*)V7);
dims[2] = PyInt_AsLong((PyObject*)V9);
if(V1==NULL
||CudaNdarray_HOST_DIMS(V1)[0]!=dims[0]||CudaNdarray_HOST_DIMS(V1)[1]!=dims[1]||CudaNdarray_HOST_DIMS(V1)[2]!=dims[2]){
Py_XDECREF(V1);
V1 = (CudaNdarray*)CudaNdarray_New();
if (!V1)
{
// exception already set
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
if (CudaNdarray_alloc_contiguous(V1, 3, dims))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
if (1 && CudaNdarray_is_c_contiguous(V1))
{
cudaError_t err = cudaMemset(V1->devdata, 0,
CudaNdarray_SIZE(V1) * 4);
if (cudaSuccess != err)
{
PyErr_Format(PyExc_MemoryError,
"GpuAlloc: Error memsetting %ld"
" bytes of device memory. %s",
(long)(CudaNdarray_SIZE(V1) * 4),
cudaGetErrorString(err));
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
}
else if (CudaNdarray_CopyFromCudaNdarray(V1, V3, true))
{
// exception already set
Py_XDECREF(V1);
V1 = NULL;
{
__failure = 11;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_11;};
}
__label_11:
double __DUMMY_11;
}
__label_10:
if (V9) {
Py_XDECREF(V9);
}
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
if (V7) {
Py_XDECREF(V7);
}
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
if (V5) {
Py_XDECREF(V5);
}
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_c4a12f4110948fee8dd968510835197b_executor(__struct_compiled_op_c4a12f4110948fee8dd968510835197b* self) {
return self->run();
}
static void __struct_compiled_op_c4a12f4110948fee8dd968510835197b_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_c4a12f4110948fee8dd968510835197b*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (6 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 6, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_c4a12f4110948fee8dd968510835197b* struct_ptr = new __struct_compiled_op_c4a12f4110948fee8dd968510835197b();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_c4a12f4110948fee8dd968510835197b_executor), struct_ptr, __struct_compiled_op_c4a12f4110948fee8dd968510835197b_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC initc4a12f4110948fee8dd968510835197b(void){
import_array();
(void) Py_InitModule("c4a12f4110948fee8dd968510835197b", MyMethods);
}
|
27bb891c8a879facabc22f24b016748aeed1621f.hip | // !!! This is a file automatically generated by hipify!!!
#include "fast_pcl/ndt_gpu/MatrixDevice.h"
#include "fast_pcl/ndt_gpu/debug.h"
namespace gpu {
MatrixDevice::MatrixDevice(int rows, int cols) {
rows_ = rows;
cols_ = cols;
offset_ = 1;
fr_ = true;
buffer_ = NULL;
}
void MatrixDevice::memAlloc()
{
if (buffer_ != NULL && fr_) {
checkCudaErrors(hipFree(buffer_));
buffer_ = NULL;
}
checkCudaErrors(hipMalloc(&buffer_, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(hipMemset(buffer_, 0, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(hipDeviceSynchronize());
fr_ = true;
}
void MatrixDevice::memFree()
{
if (fr_) {
if (buffer_ != NULL) {
checkCudaErrors(hipFree(buffer_));
buffer_ = NULL;
}
}
}
SquareMatrixDevice::SquareMatrixDevice(int size) :
MatrixDevice(size, size)
{
}
}
| 27bb891c8a879facabc22f24b016748aeed1621f.cu | #include "fast_pcl/ndt_gpu/MatrixDevice.h"
#include "fast_pcl/ndt_gpu/debug.h"
namespace gpu {
MatrixDevice::MatrixDevice(int rows, int cols) {
rows_ = rows;
cols_ = cols;
offset_ = 1;
fr_ = true;
buffer_ = NULL;
}
void MatrixDevice::memAlloc()
{
if (buffer_ != NULL && fr_) {
checkCudaErrors(cudaFree(buffer_));
buffer_ = NULL;
}
checkCudaErrors(cudaMalloc(&buffer_, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(cudaMemset(buffer_, 0, sizeof(double) * rows_ * cols_ * offset_));
checkCudaErrors(cudaDeviceSynchronize());
fr_ = true;
}
void MatrixDevice::memFree()
{
if (fr_) {
if (buffer_ != NULL) {
checkCudaErrors(cudaFree(buffer_));
buffer_ = NULL;
}
}
}
SquareMatrixDevice::SquareMatrixDevice(int size) :
MatrixDevice(size, size)
{
}
}
|
2f1b57c3b1480d826e3a373634640bb133111388.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <nvmatrix_kernels.cuh>
__global__ void kTile(hipLaunchParm lp,const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight) {
const int idx = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
const int numThreads = hipBlockDim_x * hipGridDim_x;
// const unsigned int numEls = tgtWidth * tgtHeight;
for (uint i = idx; i < tgtWidth * tgtHeight; i += numThreads) {
const uint y = i / tgtWidth;
const uint x = i % tgtWidth;
const uint srcY = y % srcHeight;
const uint srcX = x % srcWidth;
tgt[i] = src[srcY * srcWidth + srcX];
}
}
__global__ void kDotProduct_r(hipLaunchParm lp, float* a, float* b, float* target, const uint numCols, const uint numElements) {
__shared__ float shmem[DP_BLOCKSIZE];
uint eidx = DP_BLOCKSIZE * hipBlockIdx_x + hipThreadIdx_x;
shmem[hipThreadIdx_x] = 0;
if (eidx < numCols) {
for (; eidx < numElements; eidx += numCols) {
shmem[hipThreadIdx_x] += a[eidx] * b[eidx];
}
}
__syncthreads();
if (hipThreadIdx_x < 256) {
shmem[hipThreadIdx_x] += shmem[hipThreadIdx_x + 256];
}
__syncthreads();
if (hipThreadIdx_x < 128) {
shmem[hipThreadIdx_x] += shmem[hipThreadIdx_x + 128];
}
__syncthreads();
if (hipThreadIdx_x < 64) {
shmem[hipThreadIdx_x] += shmem[hipThreadIdx_x + 64];
}
__syncthreads();
if (hipThreadIdx_x < 32) {
volatile float* mysh = &shmem[hipThreadIdx_x];
*mysh += mysh[32];
*mysh += mysh[16];
*mysh += mysh[8];
*mysh += mysh[4];
*mysh += mysh[2];
*mysh += mysh[1];
if (hipThreadIdx_x == 0) {
target[hipBlockIdx_x] = *mysh;
}
}
}
__global__ void kSetupCurand(hipLaunchParm lp, hiprandState_t *state, unsigned long long seed) {
const uint tidx = NUM_RND_THREADS_PER_BLOCK * hipBlockIdx_x + hipThreadIdx_x;
/* Each thread gets same seed, a different sequence number,
no offset */
hiprand_init(seed, tidx, 0, &state[tidx]);
}
| 2f1b57c3b1480d826e3a373634640bb133111388.cu | #include "hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <nvmatrix_kernels.cuh>
__global__ void kTile(hipLaunchParm lp,const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight) {
const int idx = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
const int numThreads = hipBlockDim_x * hipGridDim_x;
// const unsigned int numEls = tgtWidth * tgtHeight;
for (uint i = idx; i < tgtWidth * tgtHeight; i += numThreads) {
const uint y = i / tgtWidth;
const uint x = i % tgtWidth;
const uint srcY = y % srcHeight;
const uint srcX = x % srcWidth;
tgt[i] = src[srcY * srcWidth + srcX];
}
}
__global__ void kDotProduct_r(hipLaunchParm lp, float* a, float* b, float* target, const uint numCols, const uint numElements) {
__shared__ float shmem[DP_BLOCKSIZE];
uint eidx = DP_BLOCKSIZE * hipBlockIdx_x + hipThreadIdx_x;
shmem[hipThreadIdx_x] = 0;
if (eidx < numCols) {
for (; eidx < numElements; eidx += numCols) {
shmem[hipThreadIdx_x] += a[eidx] * b[eidx];
}
}
__syncthreads();
if (hipThreadIdx_x < 256) {
shmem[hipThreadIdx_x] += shmem[hipThreadIdx_x + 256];
}
__syncthreads();
if (hipThreadIdx_x < 128) {
shmem[hipThreadIdx_x] += shmem[hipThreadIdx_x + 128];
}
__syncthreads();
if (hipThreadIdx_x < 64) {
shmem[hipThreadIdx_x] += shmem[hipThreadIdx_x + 64];
}
__syncthreads();
if (hipThreadIdx_x < 32) {
volatile float* mysh = &shmem[hipThreadIdx_x];
*mysh += mysh[32];
*mysh += mysh[16];
*mysh += mysh[8];
*mysh += mysh[4];
*mysh += mysh[2];
*mysh += mysh[1];
if (hipThreadIdx_x == 0) {
target[hipBlockIdx_x] = *mysh;
}
}
}
__global__ void kSetupCurand(hipLaunchParm lp, curandState *state, unsigned long long seed) {
const uint tidx = NUM_RND_THREADS_PER_BLOCK * hipBlockIdx_x + hipThreadIdx_x;
/* Each thread gets same seed, a different sequence number,
no offset */
curand_init(seed, tidx, 0, &state[tidx]);
}
|
33bc56f68f18bdd7cc4ec8be7e5cad7f31c4140e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#include "GGaussRecFilterKernels.h"
#include "pycaUtils.h"
// TEST make sure boost isn't included in nvcc code
#if defined(BOOST_COMPILER)
int bla[-1];
#endif
namespace PyCA {
template<class T, bool clampToEdge>
__global__ void RGFFilter2D_kernel(T* d_o, const T* d_i,
int sizeX, int sizeY,
float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
uint x = blockIdx.x*blockDim.x + threadIdx.x;
if ((x >= sizeX))
return;
d_o += x;
d_i += x;
T xp = (T)0; // previous input
T yp = (T)0; // previous output
T yb = (T)0; // previous output by 2
if (clampToEdge){
xp = *d_i; yb = coefp*xp; yp = yb;
}
for (int y = 0; y < sizeY; y++) {
float xc = *d_i;
float yc = a0*xc + a1*xp - b1*yp - b2*yb;
*d_o = yc;
//shifting around input output
xp = xc; yb = yp; yp = yc;
// move to the next row
d_i += sizeX; d_o += sizeX; // move to next rosizeX
}
// reset pointers to point to last element in column
d_i -= sizeX;
d_o -= sizeX;
// reverse pass
// ensures response is symmetrical
float xn =0.0f, xa = 0.0f, yn = 0.0f, ya = 0.0f;
if (clampToEdge){
xn = xa = *d_i; yn = coefn*xn; ya = yn;
}
for (int y = sizeY-1; y >= 0; y--) {
float xc = *d_i;
float yc = a2*xn + a3*xa - b1*yn - b2*ya;
*d_o = *d_o + yc;
//shifting around input output
xa = xn; xn = xc; ya = yn; yn = yc;
d_o -= sizeX; d_i -= sizeX; // move to previous row
}
}
template<class T, bool clampToEdge>
__global__ void RGFFilter3D_kernel(T* d_o, const T* d_i,
int sizeX, int sizeY, int sizeZ,
float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= sizeX) || (y >= sizeY))
return;
uint id = x + y * sizeX;
const uint planeSize = sizeX * sizeY;
d_o += id;
d_i += id;
T xp = (T)0; // previous input
T yp = (T)0; // previous output
T yb = (T)0; // previous output by 2
if (clampToEdge){
xp = *d_i; yb = coefp*xp; yp = yb;
}
for (int z = 0; z < sizeZ; z++) {
T xc = *d_i;
T yc = a0*xc + a1*xp - b1*yp - b2*yb;
*d_o = yc;
//shifting around input output
xp = xc; yb = yp; yp = yc;
// move to next plane
d_i += planeSize;
d_o += planeSize;
}
// reset pointers to point to last element in column
d_i -= planeSize;
d_o -= planeSize;
// reverse pass
// ensures response is symmetrical
T xn = (T)(0.0f);
T xa = (T)(0.0f);
T yn = (T)(0.0f);
T ya = (T)(0.0f);
if (clampToEdge){
xn = xa = *d_i; yn = coefn*xn; ya = yn;
}
for (int z = sizeZ-1; z >= 0; z--) {
T xc = *d_i;
T yc = a2*xn + a3*xa - b1*yn - b2*ya;
*d_o = *d_o + yc;
//shifting around input output
xa = xn;
xn = xc;
ya = yn;
yn = yc;
// move to previous plane
d_i -= planeSize;
d_o -= planeSize;
}
}
void
ConvolutionX3D(float* d_o, const float* d_i,
const GaussRecFilterBase<EXEC_GPU>::GaussRecParams& p,
size_t sizeX, size_t sizeY, size_t sizeZ,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sizeX, threads.x),iDivUp(sizeY, threads.y));
hipLaunchKernelGGL(( RGFFilter3D_kernel<float, true>), dim3(grids),dim3(threads), 0, stream,
d_o, d_i,
sizeX, sizeY, sizeZ,
p.a0, p.a1, p.a2, p.a3,
p.b1, p.b2, p.coefp, p.coefn);
}
} // end namespace PyCA
| 33bc56f68f18bdd7cc4ec8be7e5cad7f31c4140e.cu | /* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#include "GGaussRecFilterKernels.h"
#include "pycaUtils.h"
// TEST make sure boost isn't included in nvcc code
#if defined(BOOST_COMPILER)
int bla[-1];
#endif
namespace PyCA {
template<class T, bool clampToEdge>
__global__ void RGFFilter2D_kernel(T* d_o, const T* d_i,
int sizeX, int sizeY,
float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
uint x = blockIdx.x*blockDim.x + threadIdx.x;
if ((x >= sizeX))
return;
d_o += x;
d_i += x;
T xp = (T)0; // previous input
T yp = (T)0; // previous output
T yb = (T)0; // previous output by 2
if (clampToEdge){
xp = *d_i; yb = coefp*xp; yp = yb;
}
for (int y = 0; y < sizeY; y++) {
float xc = *d_i;
float yc = a0*xc + a1*xp - b1*yp - b2*yb;
*d_o = yc;
//shifting around input output
xp = xc; yb = yp; yp = yc;
// move to the next row
d_i += sizeX; d_o += sizeX; // move to next rosizeX
}
// reset pointers to point to last element in column
d_i -= sizeX;
d_o -= sizeX;
// reverse pass
// ensures response is symmetrical
float xn =0.0f, xa = 0.0f, yn = 0.0f, ya = 0.0f;
if (clampToEdge){
xn = xa = *d_i; yn = coefn*xn; ya = yn;
}
for (int y = sizeY-1; y >= 0; y--) {
float xc = *d_i;
float yc = a2*xn + a3*xa - b1*yn - b2*ya;
*d_o = *d_o + yc;
//shifting around input output
xa = xn; xn = xc; ya = yn; yn = yc;
d_o -= sizeX; d_i -= sizeX; // move to previous row
}
}
template<class T, bool clampToEdge>
__global__ void RGFFilter3D_kernel(T* d_o, const T* d_i,
int sizeX, int sizeY, int sizeZ,
float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= sizeX) || (y >= sizeY))
return;
uint id = x + y * sizeX;
const uint planeSize = sizeX * sizeY;
d_o += id;
d_i += id;
T xp = (T)0; // previous input
T yp = (T)0; // previous output
T yb = (T)0; // previous output by 2
if (clampToEdge){
xp = *d_i; yb = coefp*xp; yp = yb;
}
for (int z = 0; z < sizeZ; z++) {
T xc = *d_i;
T yc = a0*xc + a1*xp - b1*yp - b2*yb;
*d_o = yc;
//shifting around input output
xp = xc; yb = yp; yp = yc;
// move to next plane
d_i += planeSize;
d_o += planeSize;
}
// reset pointers to point to last element in column
d_i -= planeSize;
d_o -= planeSize;
// reverse pass
// ensures response is symmetrical
T xn = (T)(0.0f);
T xa = (T)(0.0f);
T yn = (T)(0.0f);
T ya = (T)(0.0f);
if (clampToEdge){
xn = xa = *d_i; yn = coefn*xn; ya = yn;
}
for (int z = sizeZ-1; z >= 0; z--) {
T xc = *d_i;
T yc = a2*xn + a3*xa - b1*yn - b2*ya;
*d_o = *d_o + yc;
//shifting around input output
xa = xn;
xn = xc;
ya = yn;
yn = yc;
// move to previous plane
d_i -= planeSize;
d_o -= planeSize;
}
}
void
ConvolutionX3D(float* d_o, const float* d_i,
const GaussRecFilterBase<EXEC_GPU>::GaussRecParams& p,
size_t sizeX, size_t sizeY, size_t sizeZ,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sizeX, threads.x),iDivUp(sizeY, threads.y));
RGFFilter3D_kernel<float, true><<<grids,threads, 0, stream>>>
(d_o, d_i,
sizeX, sizeY, sizeZ,
p.a0, p.a1, p.a2, p.a3,
p.b1, p.b2, p.coefp, p.coefn);
}
} // end namespace PyCA
|
711a4298282b45f469bd4a39e2f58e91fa877d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm> // std::min
#include "nasa_poly_kernels.h"
#include "zerork_cuda_defs.h"
#include "constants.h"
namespace zerork {
void __global__ cuda_get_G_RT(int nSpc, const double T, const double *thermoCoeff_dev, double *G_RT_dev)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < nSpc)
{
double Tmid,g1;
double invT=1.0/T;
double gMult[5];
gMult[0]=1.0-log(T);
gMult[1]=T;
gMult[2]=T*gMult[1];
gMult[3]=T*gMult[2];
gMult[4]=T*gMult[3];
// g0 = 1 - ln(T)
gMult[1]*=-0.50000000000000000000; // g1 = - T/2
gMult[2]*=-0.16666666666666666667; // g2 = - T^2/6
gMult[3]*=-0.08333333333333333333; // g3 = - T^3/12
gMult[4]*=-0.05000000000000000000; // g4 = - T^4/20
int coefAddr=LDA_THERMO_POLY_D5R2*tid;
Tmid=thermoCoeff_dev[coefAddr];
if(T < Tmid)
{
g1=thermoCoeff_dev[coefAddr+1]*gMult[0]+
thermoCoeff_dev[coefAddr+2]*gMult[1]+
thermoCoeff_dev[coefAddr+3]*gMult[2]+
thermoCoeff_dev[coefAddr+4]*gMult[3]+
thermoCoeff_dev[coefAddr+5]*gMult[4]+
thermoCoeff_dev[coefAddr+6]*invT-
thermoCoeff_dev[coefAddr+7];
}
else
{
g1=thermoCoeff_dev[coefAddr+8 ]*gMult[0]+
thermoCoeff_dev[coefAddr+9 ]*gMult[1]+
thermoCoeff_dev[coefAddr+10]*gMult[2]+
thermoCoeff_dev[coefAddr+11]*gMult[3]+
thermoCoeff_dev[coefAddr+12]*gMult[4]+
thermoCoeff_dev[coefAddr+13]*invT-
thermoCoeff_dev[coefAddr+14];
}
G_RT_dev[tid] = g1;
}
}
__device__ const double gMult[16] = {
//Tmid, g1, g2, g3, g4, g5 g6 g7,
1., 1.,-0.5,-0.16666666666666666667,-0.08333333333333333333,-0.05,1.,-1,
1.,-0.5,-0.16666666666666666667,-0.08333333333333333333,-0.05,1.,-1,0.,
};
void __global__ cuda_get_G_RT_v2(int nSpc, const double T, const double *thermoCoeff_dev, double *G_RT_dev)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = tid/16;
int myOffset = tid-16*speciesid;
if(speciesid < nSpc)
{
extern double __shared__ gOut[];
gOut[threadIdx.x] = thermoCoeff_dev[tid]*gMult[myOffset];
__syncthreads();
if(myOffset == 0)
{
if(T < gOut[threadIdx.x])
{
G_RT_dev[speciesid] = (((gOut[threadIdx.x+5]*T+gOut[threadIdx.x+4])*T+gOut[threadIdx.x+3])*T+gOut[threadIdx.x+2])*T
+ gOut[threadIdx.x+1]*(1.0-log(T)) + gOut[threadIdx.x+6]/T +gOut[threadIdx.x+7];
}
else
{
G_RT_dev[speciesid] = (((gOut[threadIdx.x+12]*T+gOut[threadIdx.x+11])*T+gOut[threadIdx.x+10])*T+gOut[threadIdx.x+9])*T
+ gOut[threadIdx.x+8]*(1.0-log(T)) + gOut[threadIdx.x+13]/T +gOut[threadIdx.x+14];
}
}
}
}
void __global__ cuda_get_G_RT_mr
(
int nReactors, int nSpc, const double *T_dev, const double *thermoCoeff_dev, double *G_RT_dev
)
{
int reactorid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = blockIdx.y;
if(speciesid < nSpc)
{
if(reactorid < nReactors)
{
int coefAddr = LDA_THERMO_POLY_D5R2*speciesid;
__shared__ double coeffShr[16];
int counter = threadIdx.x;
int stride = min(blockDim.x,nReactors-blockDim.x*blockIdx.x);
while(counter < 16)
{
coeffShr[counter]=thermoCoeff_dev[coefAddr+counter];
counter += stride;
}
__syncthreads();
double Tmid,g1,g2;
double T = T_dev[reactorid];
double invT=1.0/T;
double gMult[5];
gMult[0]=1.0-log(T);
gMult[1]=T;
gMult[2]=T*gMult[1];
gMult[3]=T*gMult[2];
gMult[4]=T*gMult[3];
// g0 = 1 - ln(T)
gMult[1]*=-0.50000000000000000000; // g1 = - T/2
gMult[2]*=-0.16666666666666666667; // g2 = - T^2/6
gMult[3]*=-0.08333333333333333333; // g3 = - T^3/12
gMult[4]*=-0.05000000000000000000; // g4 = - T^4/20
Tmid=coeffShr[0];
if(T < Tmid)
{
g2=coeffShr[1]*gMult[0]+
coeffShr[2]*gMult[1]+
coeffShr[3]*gMult[2]+
coeffShr[4]*gMult[3]+
coeffShr[5]*gMult[4]+
coeffShr[6]*invT-
coeffShr[7];
}
else
{
g2=coeffShr[8 ]*gMult[0]+
coeffShr[9 ]*gMult[1]+
coeffShr[10]*gMult[2]+
coeffShr[11]*gMult[3]+
coeffShr[12]*gMult[4]+
coeffShr[13]*invT-
coeffShr[14];
}
// if(T < Tmid)
// {
// g2 = g1;
// }
G_RT_dev[reactorid+speciesid*nReactors] = g2;
}
}
}
void __global__ cuda_get_H_RT_mr
(
int nReactors, int nSpc, const double *T_dev, const double *thermoCoeff_dev, double *H_RT_dev
)
{
int reactorid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = blockIdx.y;
if(speciesid < nSpc)
{
if(reactorid < nReactors)
{
int coefAddr = LDA_THERMO_POLY_D5R2*speciesid;
__shared__ double coeffShr[16];
int counter = threadIdx.x;
int stride = min(blockDim.x,nReactors-blockDim.x*blockIdx.x);
while(counter < 16)
{
coeffShr[counter]=thermoCoeff_dev[coefAddr+counter];
counter += stride;
}
__syncthreads();
double Tmid,h1,h2;
double T = T_dev[reactorid];
double invT=1.0/T;
double hMult[4];
hMult[0]=T;
hMult[1]=T*hMult[0];
hMult[2]=T*hMult[1];
hMult[3]=T*hMult[2];
hMult[0]*=0.50000000000000000000; // h0 = T/2
hMult[1]*=0.33333333333333333333; // h1 = T^2/3
hMult[2]*=0.25000000000000000000; // h2 = T^3/4
hMult[3]*=0.20000000000000000000; // h3 = T^4/5
Tmid=coeffShr[0];
h1=coeffShr[1]+
coeffShr[2]*hMult[0]+
coeffShr[3]*hMult[1]+
coeffShr[4]*hMult[2]+
coeffShr[5]*hMult[3]+
coeffShr[6]*invT;
h2=coeffShr[8 ]+
coeffShr[9 ]*hMult[0]+
coeffShr[10]*hMult[1]+
coeffShr[11]*hMult[2]+
coeffShr[12]*hMult[3]+
coeffShr[13]*invT;
if(T < Tmid)
{
h2 = h1;
}
H_RT_dev[reactorid+speciesid*nReactors] = h2;
}
}
}
void __global__ cuda_get_Cp_R_mr
(
int nReactors, int nSpc, const double *T_dev, const double *thermoCoeff_dev, double *Cp_R_dev
)
{
int reactorid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = blockIdx.y;
if(speciesid < nSpc)
{
if(reactorid < nReactors)
{
int coefAddr = LDA_THERMO_POLY_D5R2*speciesid;
__shared__ double coeffShr[16];
int counter = threadIdx.x;
int stride = min(blockDim.x,nReactors-blockDim.x*blockIdx.x);
while(counter < 16)
{
coeffShr[counter]=thermoCoeff_dev[coefAddr+counter];
counter += stride;
}
__syncthreads();
double Tmid,cp1,cp2;
double T = T_dev[reactorid];
double invT=1.0/T;
Tmid=coeffShr[0];
cp1= coeffShr[1]+
T*(coeffShr[2]+
T*(coeffShr[3]+
T*(coeffShr[4]+
T* coeffShr[5])));
cp2= coeffShr[8 ]+
T*(coeffShr[9 ]+
T*(coeffShr[10]+
T*(coeffShr[11]+
T* coeffShr[12])));
if(T < Tmid)
{
cp2 = cp1;
}
Cp_R_dev[reactorid+speciesid*nReactors] = cp2;
}
}
}
void nasa_poly_group_getG_RT_CUDA(const double T, double * G_RT_dev, const int nGroupSpc, const double * thermoCoeff_dev, hipStream_t stream)
{
int nThreads = THREADS_PER_BLOCK;
int nBlocks = (nGroupSpc+nThreads-1)/nThreads;
hipLaunchKernelGGL(( cuda_get_G_RT), dim3(nBlocks), dim3(nThreads), 0, stream, nGroupSpc, T, thermoCoeff_dev, G_RT_dev );
#ifdef ZERORK_FULL_DEBUG
hipDeviceSynchronize();
checkCudaError(hipGetLastError(),"cuda_get_G_RT");
#endif
}
void nasa_poly_group_getG_RT_CUDA_mr(const int nReactors, const double *T_dev, double *G_RT_dev, const int nGroupSpc, const double * thermoCoeff_dev, hipStream_t stream)
{
int nThreads = ::min(nReactors,MAX_THREADS_PER_BLOCK);
dim3 nBlocks((nReactors+nThreads-1)/nThreads,nGroupSpc);
hipLaunchKernelGGL(( cuda_get_G_RT_mr), dim3(nBlocks), dim3(nThreads), 0, stream, nReactors, nGroupSpc, T_dev, thermoCoeff_dev, G_RT_dev);
#ifdef ZERORK_FULL_DEBUG
hipDeviceSynchronize();
checkCudaError(hipGetLastError(),"cuda_get_G_RT_mr");
#endif
}
void nasa_poly_group_getH_RT_CUDA_mr(const int nReactors, const double *T_dev, double *H_RT_dev, const int nGroupSpc, const double * thermoCoeff_dev, hipStream_t stream)
{
int nThreads = ::min(nReactors,MAX_THREADS_PER_BLOCK);
dim3 nBlocks((nReactors+nThreads-1)/nThreads,nGroupSpc);
hipLaunchKernelGGL(( cuda_get_H_RT_mr), dim3(nBlocks), dim3(nThreads), 0, stream, nReactors, nGroupSpc, T_dev, thermoCoeff_dev, H_RT_dev);
#ifdef ZERORK_FULL_DEBUG
hipDeviceSynchronize();
checkCudaError(hipGetLastError(),"cuda_get_H_RT_mr");
#endif
}
void nasa_poly_group_getCp_R_CUDA_mr(const int nReactors, const double *T_dev, double *Cp_R_dev, const int nGroupSpc, const double * thermoCoeff_dev, hipStream_t stream)
{
int nThreads = ::min(nReactors,MAX_THREADS_PER_BLOCK);
dim3 nBlocks((nReactors+nThreads-1)/nThreads,nGroupSpc);
hipLaunchKernelGGL(( cuda_get_Cp_R_mr), dim3(nBlocks), dim3(nThreads), 0, stream, nReactors, nGroupSpc, T_dev, thermoCoeff_dev, Cp_R_dev);
#ifdef ZERORK_FULL_DEBUG
hipDeviceSynchronize();
checkCudaError(hipGetLastError(),"cuda_get_Cp_R_mr");
#endif
}
} // namespace zerork
| 711a4298282b45f469bd4a39e2f58e91fa877d2c.cu | #include <algorithm> // std::min
#include "nasa_poly_kernels.h"
#include "zerork_cuda_defs.h"
#include "constants.h"
namespace zerork {
void __global__ cuda_get_G_RT(int nSpc, const double T, const double *thermoCoeff_dev, double *G_RT_dev)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < nSpc)
{
double Tmid,g1;
double invT=1.0/T;
double gMult[5];
gMult[0]=1.0-log(T);
gMult[1]=T;
gMult[2]=T*gMult[1];
gMult[3]=T*gMult[2];
gMult[4]=T*gMult[3];
// g0 = 1 - ln(T)
gMult[1]*=-0.50000000000000000000; // g1 = - T/2
gMult[2]*=-0.16666666666666666667; // g2 = - T^2/6
gMult[3]*=-0.08333333333333333333; // g3 = - T^3/12
gMult[4]*=-0.05000000000000000000; // g4 = - T^4/20
int coefAddr=LDA_THERMO_POLY_D5R2*tid;
Tmid=thermoCoeff_dev[coefAddr];
if(T < Tmid)
{
g1=thermoCoeff_dev[coefAddr+1]*gMult[0]+
thermoCoeff_dev[coefAddr+2]*gMult[1]+
thermoCoeff_dev[coefAddr+3]*gMult[2]+
thermoCoeff_dev[coefAddr+4]*gMult[3]+
thermoCoeff_dev[coefAddr+5]*gMult[4]+
thermoCoeff_dev[coefAddr+6]*invT-
thermoCoeff_dev[coefAddr+7];
}
else
{
g1=thermoCoeff_dev[coefAddr+8 ]*gMult[0]+
thermoCoeff_dev[coefAddr+9 ]*gMult[1]+
thermoCoeff_dev[coefAddr+10]*gMult[2]+
thermoCoeff_dev[coefAddr+11]*gMult[3]+
thermoCoeff_dev[coefAddr+12]*gMult[4]+
thermoCoeff_dev[coefAddr+13]*invT-
thermoCoeff_dev[coefAddr+14];
}
G_RT_dev[tid] = g1;
}
}
__device__ const double gMult[16] = {
//Tmid, g1, g2, g3, g4, g5 g6 g7,
1., 1.,-0.5,-0.16666666666666666667,-0.08333333333333333333,-0.05,1.,-1,
1.,-0.5,-0.16666666666666666667,-0.08333333333333333333,-0.05,1.,-1,0.,
};
void __global__ cuda_get_G_RT_v2(int nSpc, const double T, const double *thermoCoeff_dev, double *G_RT_dev)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = tid/16;
int myOffset = tid-16*speciesid;
if(speciesid < nSpc)
{
extern double __shared__ gOut[];
gOut[threadIdx.x] = thermoCoeff_dev[tid]*gMult[myOffset];
__syncthreads();
if(myOffset == 0)
{
if(T < gOut[threadIdx.x])
{
G_RT_dev[speciesid] = (((gOut[threadIdx.x+5]*T+gOut[threadIdx.x+4])*T+gOut[threadIdx.x+3])*T+gOut[threadIdx.x+2])*T
+ gOut[threadIdx.x+1]*(1.0-log(T)) + gOut[threadIdx.x+6]/T +gOut[threadIdx.x+7];
}
else
{
G_RT_dev[speciesid] = (((gOut[threadIdx.x+12]*T+gOut[threadIdx.x+11])*T+gOut[threadIdx.x+10])*T+gOut[threadIdx.x+9])*T
+ gOut[threadIdx.x+8]*(1.0-log(T)) + gOut[threadIdx.x+13]/T +gOut[threadIdx.x+14];
}
}
}
}
void __global__ cuda_get_G_RT_mr
(
int nReactors, int nSpc, const double *T_dev, const double *thermoCoeff_dev, double *G_RT_dev
)
{
int reactorid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = blockIdx.y;
if(speciesid < nSpc)
{
if(reactorid < nReactors)
{
int coefAddr = LDA_THERMO_POLY_D5R2*speciesid;
__shared__ double coeffShr[16];
int counter = threadIdx.x;
int stride = min(blockDim.x,nReactors-blockDim.x*blockIdx.x);
while(counter < 16)
{
coeffShr[counter]=thermoCoeff_dev[coefAddr+counter];
counter += stride;
}
__syncthreads();
double Tmid,g1,g2;
double T = T_dev[reactorid];
double invT=1.0/T;
double gMult[5];
gMult[0]=1.0-log(T);
gMult[1]=T;
gMult[2]=T*gMult[1];
gMult[3]=T*gMult[2];
gMult[4]=T*gMult[3];
// g0 = 1 - ln(T)
gMult[1]*=-0.50000000000000000000; // g1 = - T/2
gMult[2]*=-0.16666666666666666667; // g2 = - T^2/6
gMult[3]*=-0.08333333333333333333; // g3 = - T^3/12
gMult[4]*=-0.05000000000000000000; // g4 = - T^4/20
Tmid=coeffShr[0];
if(T < Tmid)
{
g2=coeffShr[1]*gMult[0]+
coeffShr[2]*gMult[1]+
coeffShr[3]*gMult[2]+
coeffShr[4]*gMult[3]+
coeffShr[5]*gMult[4]+
coeffShr[6]*invT-
coeffShr[7];
}
else
{
g2=coeffShr[8 ]*gMult[0]+
coeffShr[9 ]*gMult[1]+
coeffShr[10]*gMult[2]+
coeffShr[11]*gMult[3]+
coeffShr[12]*gMult[4]+
coeffShr[13]*invT-
coeffShr[14];
}
// if(T < Tmid)
// {
// g2 = g1;
// }
G_RT_dev[reactorid+speciesid*nReactors] = g2;
}
}
}
void __global__ cuda_get_H_RT_mr
(
int nReactors, int nSpc, const double *T_dev, const double *thermoCoeff_dev, double *H_RT_dev
)
{
int reactorid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = blockIdx.y;
if(speciesid < nSpc)
{
if(reactorid < nReactors)
{
int coefAddr = LDA_THERMO_POLY_D5R2*speciesid;
__shared__ double coeffShr[16];
int counter = threadIdx.x;
int stride = min(blockDim.x,nReactors-blockDim.x*blockIdx.x);
while(counter < 16)
{
coeffShr[counter]=thermoCoeff_dev[coefAddr+counter];
counter += stride;
}
__syncthreads();
double Tmid,h1,h2;
double T = T_dev[reactorid];
double invT=1.0/T;
double hMult[4];
hMult[0]=T;
hMult[1]=T*hMult[0];
hMult[2]=T*hMult[1];
hMult[3]=T*hMult[2];
hMult[0]*=0.50000000000000000000; // h0 = T/2
hMult[1]*=0.33333333333333333333; // h1 = T^2/3
hMult[2]*=0.25000000000000000000; // h2 = T^3/4
hMult[3]*=0.20000000000000000000; // h3 = T^4/5
Tmid=coeffShr[0];
h1=coeffShr[1]+
coeffShr[2]*hMult[0]+
coeffShr[3]*hMult[1]+
coeffShr[4]*hMult[2]+
coeffShr[5]*hMult[3]+
coeffShr[6]*invT;
h2=coeffShr[8 ]+
coeffShr[9 ]*hMult[0]+
coeffShr[10]*hMult[1]+
coeffShr[11]*hMult[2]+
coeffShr[12]*hMult[3]+
coeffShr[13]*invT;
if(T < Tmid)
{
h2 = h1;
}
H_RT_dev[reactorid+speciesid*nReactors] = h2;
}
}
}
void __global__ cuda_get_Cp_R_mr
(
int nReactors, int nSpc, const double *T_dev, const double *thermoCoeff_dev, double *Cp_R_dev
)
{
int reactorid = blockIdx.x*blockDim.x + threadIdx.x;
int speciesid = blockIdx.y;
if(speciesid < nSpc)
{
if(reactorid < nReactors)
{
int coefAddr = LDA_THERMO_POLY_D5R2*speciesid;
__shared__ double coeffShr[16];
int counter = threadIdx.x;
int stride = min(blockDim.x,nReactors-blockDim.x*blockIdx.x);
while(counter < 16)
{
coeffShr[counter]=thermoCoeff_dev[coefAddr+counter];
counter += stride;
}
__syncthreads();
double Tmid,cp1,cp2;
double T = T_dev[reactorid];
double invT=1.0/T;
Tmid=coeffShr[0];
cp1= coeffShr[1]+
T*(coeffShr[2]+
T*(coeffShr[3]+
T*(coeffShr[4]+
T* coeffShr[5])));
cp2= coeffShr[8 ]+
T*(coeffShr[9 ]+
T*(coeffShr[10]+
T*(coeffShr[11]+
T* coeffShr[12])));
if(T < Tmid)
{
cp2 = cp1;
}
Cp_R_dev[reactorid+speciesid*nReactors] = cp2;
}
}
}
void nasa_poly_group_getG_RT_CUDA(const double T, double * G_RT_dev, const int nGroupSpc, const double * thermoCoeff_dev, cudaStream_t stream)
{
int nThreads = THREADS_PER_BLOCK;
int nBlocks = (nGroupSpc+nThreads-1)/nThreads;
cuda_get_G_RT<<<nBlocks, nThreads, 0, stream>>> (nGroupSpc, T, thermoCoeff_dev, G_RT_dev );
#ifdef ZERORK_FULL_DEBUG
cudaDeviceSynchronize();
checkCudaError(cudaGetLastError(),"cuda_get_G_RT");
#endif
}
void nasa_poly_group_getG_RT_CUDA_mr(const int nReactors, const double *T_dev, double *G_RT_dev, const int nGroupSpc, const double * thermoCoeff_dev, cudaStream_t stream)
{
int nThreads = std::min(nReactors,MAX_THREADS_PER_BLOCK);
dim3 nBlocks((nReactors+nThreads-1)/nThreads,nGroupSpc);
cuda_get_G_RT_mr<<<nBlocks, nThreads, 0, stream>>>(nReactors, nGroupSpc, T_dev, thermoCoeff_dev, G_RT_dev);
#ifdef ZERORK_FULL_DEBUG
cudaDeviceSynchronize();
checkCudaError(cudaGetLastError(),"cuda_get_G_RT_mr");
#endif
}
void nasa_poly_group_getH_RT_CUDA_mr(const int nReactors, const double *T_dev, double *H_RT_dev, const int nGroupSpc, const double * thermoCoeff_dev, cudaStream_t stream)
{
int nThreads = std::min(nReactors,MAX_THREADS_PER_BLOCK);
dim3 nBlocks((nReactors+nThreads-1)/nThreads,nGroupSpc);
cuda_get_H_RT_mr<<<nBlocks, nThreads, 0, stream>>>(nReactors, nGroupSpc, T_dev, thermoCoeff_dev, H_RT_dev);
#ifdef ZERORK_FULL_DEBUG
cudaDeviceSynchronize();
checkCudaError(cudaGetLastError(),"cuda_get_H_RT_mr");
#endif
}
void nasa_poly_group_getCp_R_CUDA_mr(const int nReactors, const double *T_dev, double *Cp_R_dev, const int nGroupSpc, const double * thermoCoeff_dev, cudaStream_t stream)
{
int nThreads = std::min(nReactors,MAX_THREADS_PER_BLOCK);
dim3 nBlocks((nReactors+nThreads-1)/nThreads,nGroupSpc);
cuda_get_Cp_R_mr<<<nBlocks, nThreads, 0, stream>>>(nReactors, nGroupSpc, T_dev, thermoCoeff_dev, Cp_R_dev);
#ifdef ZERORK_FULL_DEBUG
cudaDeviceSynchronize();
checkCudaError(cudaGetLastError(),"cuda_get_Cp_R_mr");
#endif
}
} // namespace zerork
|
c12ff3455eff9df907a3561e08072da3ab3a81ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/device.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template <typename Dtype>
__global__ void SGDUpdate(int N, Dtype* g, Dtype* h,
Dtype momentum, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
g[i] = h[i] = momentum*h[i] + local_rate*g[i];
}
}
#endif
template <typename Dtype>
void sgd_update_gpu(device* dev, int_tp N, Dtype* g, Dtype* h, Dtype momentum,
Dtype local_rate) {
if (dev->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS) (
N, g, h, momentum, local_rate);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(dev->id());
viennacl::ocl::program &program = dev->template program<Dtype>();
viennacl::ocl::kernel &oclk_sgd_update = program.get_kernel(
CL_KERNEL_SELECT("sgd_update"));
viennacl::ocl::enqueue(
oclk_sgd_update(N, WrapHandle((cl_mem) g, &ctx),
WrapHandle((cl_mem) h, &ctx), fixup_arg_type(momentum),
fixup_arg_type(local_rate)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void sgd_update_gpu<half>(device*, int_tp, half*, half*, half,
half);
#endif
template void sgd_update_gpu<float>(device*, int_tp, float*, float*, float,
float);
template void sgd_update_gpu<double>(device*, int_tp, double*, double*, double,
double);
} // namespace caffe
| c12ff3455eff9df907a3561e08072da3ab3a81ce.cu | #include "caffe/device.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template <typename Dtype>
__global__ void SGDUpdate(int N, Dtype* g, Dtype* h,
Dtype momentum, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
g[i] = h[i] = momentum*h[i] + local_rate*g[i];
}
}
#endif
template <typename Dtype>
void sgd_update_gpu(device* dev, int_tp N, Dtype* g, Dtype* h, Dtype momentum,
Dtype local_rate) {
if (dev->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
CUDA_KERNEL(CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS) (
N, g, h, momentum, local_rate);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(dev->id());
viennacl::ocl::program &program = dev->template program<Dtype>();
viennacl::ocl::kernel &oclk_sgd_update = program.get_kernel(
CL_KERNEL_SELECT("sgd_update"));
viennacl::ocl::enqueue(
oclk_sgd_update(N, WrapHandle((cl_mem) g, &ctx),
WrapHandle((cl_mem) h, &ctx), fixup_arg_type(momentum),
fixup_arg_type(local_rate)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void sgd_update_gpu<half>(device*, int_tp, half*, half*, half,
half);
#endif
template void sgd_update_gpu<float>(device*, int_tp, float*, float*, float,
float);
template void sgd_update_gpu<double>(device*, int_tp, double*, double*, double,
double);
} // namespace caffe
|
4ebc1673c7904758bfe9a1e4b69d311ab80b4bcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "util.cu.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
// input: radius (1), nsample (1), points (b,n,3), new_points (b,m,3)
// output: idx (b,m,nsample)
__global__ void QueryBall(int b,
int n,
int m,
T radius,
int nsample,
const T *points,
const T *new_points,
int *idx) {
int batch_index = blockIdx.x;
points += n * 3 * batch_index;
new_points += m * 3 * batch_index;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < m; j += stride) {
int cnt = 0;
for (int k = 0; k < n; ++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2 = new_points[j * 3 + 0];
float y2 = new_points[j * 3 + 1];
float z2 = new_points[j * 3 + 2];
float x1 = points[k * 3 + 0];
float y1 = points[k * 3 + 1];
float z1 = points[k * 3 + 2];
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
if (d < radius * radius) {
if (cnt == 0) { // set ALL indices to k, s.t. if there are less points
// in ball than nsample, we still have valid
// (repeating) indices
for (int l = 0; l < nsample; ++l) idx[j * nsample + l] = k;
}
idx[j * nsample + cnt] = k;
cnt += 1;
}
}
}
}
template <typename T>
class QueryBallOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
// input: radius (1), nsample (1), points (b,n,3), new_points (b,m,3)
// output: idx (b,m,nsample)
auto *points = ctx.Input<Tensor>("Points");
auto *new_points = ctx.Input<Tensor>("New_Points");
auto *output = ctx.Output<Tensor>("Output");
float radius = ctx.Attr<T>("Radius");
int nsample = ctx.Attr<int>("N_sample");
if (points->numel() == 0 || new_points->numel() == 0) return;
int batch_size = points->dims()[0];
int n = points->dims()[1];
int m = new_points->dims()[1];
// allocate memory
int* p_out_points = output->mutable_data<int>({batch_size, m, nsample}, ctx.GetPlace());
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
int pnum = output->numel();
hipLaunchKernelGGL(( Zero<int>), dim3((pnum + 512 - 1) / 512), dim3(512), 0, dev_ctx.stream(), p_out_points,
pnum);
const T *p_points = points->data<T>();
const T *p_new_points = new_points->data<T>();
hipLaunchKernelGGL(( QueryBall), dim3(batch_size), dim3(256), 0, 0, batch_size,
n,
m,
radius,
nsample,
p_points,
p_new_points,
p_out_points);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(query_ball, ops::QueryBallOpCUDAKernel<float>);
| 4ebc1673c7904758bfe9a1e4b69d311ab80b4bcd.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "util.cu.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
// input: radius (1), nsample (1), points (b,n,3), new_points (b,m,3)
// output: idx (b,m,nsample)
__global__ void QueryBall(int b,
int n,
int m,
T radius,
int nsample,
const T *points,
const T *new_points,
int *idx) {
int batch_index = blockIdx.x;
points += n * 3 * batch_index;
new_points += m * 3 * batch_index;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < m; j += stride) {
int cnt = 0;
for (int k = 0; k < n; ++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2 = new_points[j * 3 + 0];
float y2 = new_points[j * 3 + 1];
float z2 = new_points[j * 3 + 2];
float x1 = points[k * 3 + 0];
float y1 = points[k * 3 + 1];
float z1 = points[k * 3 + 2];
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
if (d < radius * radius) {
if (cnt == 0) { // set ALL indices to k, s.t. if there are less points
// in ball than nsample, we still have valid
// (repeating) indices
for (int l = 0; l < nsample; ++l) idx[j * nsample + l] = k;
}
idx[j * nsample + cnt] = k;
cnt += 1;
}
}
}
}
template <typename T>
class QueryBallOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
// input: radius (1), nsample (1), points (b,n,3), new_points (b,m,3)
// output: idx (b,m,nsample)
auto *points = ctx.Input<Tensor>("Points");
auto *new_points = ctx.Input<Tensor>("New_Points");
auto *output = ctx.Output<Tensor>("Output");
float radius = ctx.Attr<T>("Radius");
int nsample = ctx.Attr<int>("N_sample");
if (points->numel() == 0 || new_points->numel() == 0) return;
int batch_size = points->dims()[0];
int n = points->dims()[1];
int m = new_points->dims()[1];
// allocate memory
int* p_out_points = output->mutable_data<int>({batch_size, m, nsample}, ctx.GetPlace());
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
int pnum = output->numel();
Zero<int><<<(pnum + 512 - 1) / 512, 512, 0, dev_ctx.stream()>>>(p_out_points,
pnum);
const T *p_points = points->data<T>();
const T *p_new_points = new_points->data<T>();
QueryBall<<<batch_size, 256>>>(batch_size,
n,
m,
radius,
nsample,
p_points,
p_new_points,
p_out_points);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(query_ball, ops::QueryBallOpCUDAKernel<float>);
|
f6613444842b67ec13c00f3f63014430cfb30f42.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#define BUF_SIZE 256
#define PATTERN 0xDEADBEEF
// CPU implementation of matrix transpose
void matrixTransposeCPUReference(float* output, float* input,
unsigned int numGroups, unsigned int subGroupSize) {
for (unsigned i = 0; i < numGroups; ++i) {
for (unsigned j = 0; j < subGroupSize; j++) {
output[i * subGroupSize + j] = input[i * subGroupSize + subGroupSize - j - 1];
}
}
}
void verifyBroadcast(const int *out, const int subGroupSize, int pattern = 0)
{
int expected = pattern;
if (pattern == 0) {
for (int i = 0; i < subGroupSize; i++)
expected += i;
}
int errors = 0;
for (int i = 0; i < BUF_SIZE; i++) {
if (out[i] != expected) {
std::cout << "(sg" << subGroupSize << ") ";
std::cout << "ERROR @ " << i << ": " << out[i] << "\n";
++errors;
break;
}
}
if (errors == 0)
std::cout << "PASS\n";
else
std::cout << "FAIL\n";
}
void verifyTransposeMatrix(const float *TransposeMatrix,
const float* cpuTransposeMatrix,
const int total, const int subGroupSize)
{
int errors = 0;
float eps = 1.0E-6;
for (int i = 0; i < total; i++) {
if (::fabs(TransposeMatrix[i] - cpuTransposeMatrix[i]) > eps) {
std::cout << "(sg" << subGroupSize << ") ";
std::cout << "ITEM: " << i <<
" cpu: " << cpuTransposeMatrix[i] <<
" gpu: " << TransposeMatrix[i] << "\n";
errors++;
break;
}
}
if (errors == 0)
std::cout << "PASS\n";
else
std::cout << "FAIL\n";
}
#define __shfl(v, d) __shfl_sync(0xffffffff, v, d)
#define __shfl_xor(v, d) __shfl_xor_sync(0xffffffff, v, d)
//==================================================================================
// Broadcast
//==================================================================================
__global__ void bcast_shfl_sg8(const int arg, int *out) {
int value = ((threadIdx.x & 0x7) == 0) ? arg : 0;
// Synchronize all threads in warp, and get "value" from lane 0
int out_v = __shfl( value, 0);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = out_v;
}
__global__ void bcast_shfl_xor_sg8(int *out) {
int value = (threadIdx.x & 0x7);
for (int mask = 1; mask < 0x7; mask *= 2)
value += __shfl_xor(value, mask);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = value;
}
__global__ void bcast_shfl_sg16(const int arg, int *out) {
int value = ((threadIdx.x & 0xf) == 0) ? arg : 0;
// Synchronize all threads in warp, and get "value" from lane 0
int out_v = __shfl( value, 0);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = out_v;
}
__global__ void bcast_shfl_xor_sg16(int *out) {
int value = (threadIdx.x & 0xf);
for (int mask = 1; mask < 0xf; mask *= 2)
value += __shfl_xor(value, mask);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = value;
}
__global__ void bcast_shfl_sg32(const int arg, int *out) {
int value = ((threadIdx.x & 0x1f) == 0) ? arg : 0;
// Synchronize all threads in warp, and get "value" from lane 0
int out_v = __shfl( value, 0);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = out_v;
}
__global__ void bcast_shfl_xor_sg32(int *out) {
int value = (threadIdx.x & 0x1f);
for (int mask = 1; mask < 0x1f; mask *= 2)
value += __shfl_xor(value, mask);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = value;
}
//==================================================================================
// Matrix transpose
//==================================================================================
__global__ void transpose_shfl(float* out, const float* in) {
unsigned b_start = blockDim.x * blockIdx.x;
unsigned b_offs = b_start + threadIdx.x;
unsigned s_offs = blockDim.x - threadIdx.x - 1;
float val = in[b_offs];
out[b_offs] = __shfl(val, s_offs);
}
int main(int argc, char* argv[]) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <repeat> <repeat for matrix transpose>\n";
return 1;
}
const int repeat = atoi(argv[1]);
const int repeat2 = atoi(argv[2]);
std::cout << "Broadcast using shuffle functions\n";
int *out = (int *)malloc(sizeof(int) * BUF_SIZE);
int *d_out;
hipMalloc((void **)&d_out, sizeof(int) * BUF_SIZE);
// warmup
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_xor_sg8) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, d_out);
hipDeviceSynchronize();
std::cout << "Broadcast using the shuffle xor function (subgroup sizes 8, 16, and 32) \n";
auto begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_xor_sg8) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, d_out);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 8): "
<< time * 1e-3f / repeat << " (us)\n";
hipMemcpy(out, d_out, sizeof(int) * BUF_SIZE, hipMemcpyDeviceToHost);
verifyBroadcast(out, 8);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_xor_sg16) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, d_out);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 16): "
<< time * 1e-3f / repeat << " (us)\n";
hipMemcpy(out, d_out, sizeof(int) * BUF_SIZE, hipMemcpyDeviceToHost);
verifyBroadcast(out, 16);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_xor_sg32) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, d_out);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 32): "
<< time * 1e-3f / repeat << " (us)\n";
hipMemcpy(out, d_out, sizeof(int) * BUF_SIZE, hipMemcpyDeviceToHost);
verifyBroadcast(out, 32);
std::cout << "Broadcast using the shuffle function (subgroup sizes 8, 16, and 32) \n";
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_sg8) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, PATTERN, d_out);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 8): "
<< time * 1e-3f / repeat << " (us)\n";
hipMemcpy(out, d_out, sizeof(int) * BUF_SIZE, hipMemcpyDeviceToHost);
verifyBroadcast(out, 8, PATTERN);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_sg16) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, PATTERN, d_out);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 16): "
<< time * 1e-3f / repeat << " (us)\n";
hipMemcpy(out, d_out, sizeof(int) * BUF_SIZE, hipMemcpyDeviceToHost);
verifyBroadcast(out, 16, PATTERN);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
hipLaunchKernelGGL(( bcast_shfl_sg32) , dim3(dim3(1)), dim3(dim3(BUF_SIZE)) , 0, 0, PATTERN, d_out);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 32): "
<< time * 1e-3f / repeat << " (us)\n";
hipMemcpy(out, d_out, sizeof(int) * BUF_SIZE, hipMemcpyDeviceToHost);
verifyBroadcast(out, 32, PATTERN);
free(out);
hipFree(d_out);
std::cout << "matrix transpose using the shuffle function (subgroup sizes are 8, 16, and 32)\n";
const int total = 1 << 27; // total number of elements in a matrix
float* Matrix = (float*)malloc(total * sizeof(float));
float* TransposeMatrix = (float*)malloc(total * sizeof(float));
float* cpuTransposeMatrix = (float*)malloc(total * sizeof(float));
// initialize the input data
for (int i = 0; i < total; i++) {
Matrix[i] = (float)i * 10.0f;
}
float *gpuMatrix;
float *gpuTransposeMatrix;
// allocate the memory on the device side
hipMalloc((void **)&gpuMatrix, total * sizeof(float));
hipMalloc((void **)&gpuTransposeMatrix, total * sizeof(float));
hipMemcpy(gpuMatrix, Matrix, total * sizeof(float), hipMemcpyHostToDevice);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat2; n++)
hipLaunchKernelGGL(( transpose_shfl) , dim3(dim3(total/8)), dim3(dim3(8)) , 0, 0, gpuTransposeMatrix, gpuMatrix);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 8): "
<< time * 1e-3f / repeat2 << " (us)\n";
// Memory transfer from device to host
hipMemcpy(TransposeMatrix, gpuTransposeMatrix, total * sizeof(float), hipMemcpyDeviceToHost);
matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, total/8, 8);
verifyTransposeMatrix(TransposeMatrix, cpuTransposeMatrix, total, 8);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat2; n++)
hipLaunchKernelGGL(( transpose_shfl) , dim3(dim3(total/16)), dim3(dim3(16)) , 0, 0, gpuTransposeMatrix, gpuMatrix);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 16): "
<< time * 1e-3f / repeat2 << " (us)\n";
// Memory transfer from device to host
hipMemcpy(TransposeMatrix, gpuTransposeMatrix, total * sizeof(float), hipMemcpyDeviceToHost);
matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, total/16, 16);
verifyTransposeMatrix(TransposeMatrix, cpuTransposeMatrix, total, 16);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat2; n++)
hipLaunchKernelGGL(( transpose_shfl) , dim3(dim3(total/32)), dim3(dim3(32)) , 0, 0, gpuTransposeMatrix, gpuMatrix);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 32): "
<< time * 1e-3f / repeat2 << " (us)\n";
// Memory transfer from device to host
hipMemcpy(TransposeMatrix, gpuTransposeMatrix, total * sizeof(float), hipMemcpyDeviceToHost);
matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, total/32, 32);
verifyTransposeMatrix(TransposeMatrix, cpuTransposeMatrix, total, 32);
// free the resources
hipFree(gpuMatrix);
hipFree(gpuTransposeMatrix);
free(Matrix);
free(TransposeMatrix);
free(cpuTransposeMatrix);
return 0;
}
| f6613444842b67ec13c00f3f63014430cfb30f42.cu | /*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#define BUF_SIZE 256
#define PATTERN 0xDEADBEEF
// CPU implementation of matrix transpose
void matrixTransposeCPUReference(float* output, float* input,
unsigned int numGroups, unsigned int subGroupSize) {
for (unsigned i = 0; i < numGroups; ++i) {
for (unsigned j = 0; j < subGroupSize; j++) {
output[i * subGroupSize + j] = input[i * subGroupSize + subGroupSize - j - 1];
}
}
}
void verifyBroadcast(const int *out, const int subGroupSize, int pattern = 0)
{
int expected = pattern;
if (pattern == 0) {
for (int i = 0; i < subGroupSize; i++)
expected += i;
}
int errors = 0;
for (int i = 0; i < BUF_SIZE; i++) {
if (out[i] != expected) {
std::cout << "(sg" << subGroupSize << ") ";
std::cout << "ERROR @ " << i << ": " << out[i] << "\n";
++errors;
break;
}
}
if (errors == 0)
std::cout << "PASS\n";
else
std::cout << "FAIL\n";
}
void verifyTransposeMatrix(const float *TransposeMatrix,
const float* cpuTransposeMatrix,
const int total, const int subGroupSize)
{
int errors = 0;
float eps = 1.0E-6;
for (int i = 0; i < total; i++) {
if (std::fabs(TransposeMatrix[i] - cpuTransposeMatrix[i]) > eps) {
std::cout << "(sg" << subGroupSize << ") ";
std::cout << "ITEM: " << i <<
" cpu: " << cpuTransposeMatrix[i] <<
" gpu: " << TransposeMatrix[i] << "\n";
errors++;
break;
}
}
if (errors == 0)
std::cout << "PASS\n";
else
std::cout << "FAIL\n";
}
#define __shfl(v, d) __shfl_sync(0xffffffff, v, d)
#define __shfl_xor(v, d) __shfl_xor_sync(0xffffffff, v, d)
//==================================================================================
// Broadcast
//==================================================================================
__global__ void bcast_shfl_sg8(const int arg, int *out) {
int value = ((threadIdx.x & 0x7) == 0) ? arg : 0;
// Synchronize all threads in warp, and get "value" from lane 0
int out_v = __shfl( value, 0);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = out_v;
}
__global__ void bcast_shfl_xor_sg8(int *out) {
int value = (threadIdx.x & 0x7);
for (int mask = 1; mask < 0x7; mask *= 2)
value += __shfl_xor(value, mask);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = value;
}
__global__ void bcast_shfl_sg16(const int arg, int *out) {
int value = ((threadIdx.x & 0xf) == 0) ? arg : 0;
// Synchronize all threads in warp, and get "value" from lane 0
int out_v = __shfl( value, 0);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = out_v;
}
__global__ void bcast_shfl_xor_sg16(int *out) {
int value = (threadIdx.x & 0xf);
for (int mask = 1; mask < 0xf; mask *= 2)
value += __shfl_xor(value, mask);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = value;
}
__global__ void bcast_shfl_sg32(const int arg, int *out) {
int value = ((threadIdx.x & 0x1f) == 0) ? arg : 0;
// Synchronize all threads in warp, and get "value" from lane 0
int out_v = __shfl( value, 0);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = out_v;
}
__global__ void bcast_shfl_xor_sg32(int *out) {
int value = (threadIdx.x & 0x1f);
for (int mask = 1; mask < 0x1f; mask *= 2)
value += __shfl_xor(value, mask);
size_t oi = blockDim.x * blockIdx.x + threadIdx.x;
out[oi] = value;
}
//==================================================================================
// Matrix transpose
//==================================================================================
__global__ void transpose_shfl(float* out, const float* in) {
unsigned b_start = blockDim.x * blockIdx.x;
unsigned b_offs = b_start + threadIdx.x;
unsigned s_offs = blockDim.x - threadIdx.x - 1;
float val = in[b_offs];
out[b_offs] = __shfl(val, s_offs);
}
int main(int argc, char* argv[]) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <repeat> <repeat for matrix transpose>\n";
return 1;
}
const int repeat = atoi(argv[1]);
const int repeat2 = atoi(argv[2]);
std::cout << "Broadcast using shuffle functions\n";
int *out = (int *)malloc(sizeof(int) * BUF_SIZE);
int *d_out;
cudaMalloc((void **)&d_out, sizeof(int) * BUF_SIZE);
// warmup
for (int n = 0; n < repeat; n++)
bcast_shfl_xor_sg8 <<< dim3(1), dim3(BUF_SIZE) >>> (d_out);
cudaDeviceSynchronize();
std::cout << "Broadcast using the shuffle xor function (subgroup sizes 8, 16, and 32) \n";
auto begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
bcast_shfl_xor_sg8 <<< dim3(1), dim3(BUF_SIZE) >>> (d_out);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 8): "
<< time * 1e-3f / repeat << " (us)\n";
cudaMemcpy(out, d_out, sizeof(int) * BUF_SIZE, cudaMemcpyDeviceToHost);
verifyBroadcast(out, 8);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
bcast_shfl_xor_sg16 <<< dim3(1), dim3(BUF_SIZE) >>> (d_out);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 16): "
<< time * 1e-3f / repeat << " (us)\n";
cudaMemcpy(out, d_out, sizeof(int) * BUF_SIZE, cudaMemcpyDeviceToHost);
verifyBroadcast(out, 16);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
bcast_shfl_xor_sg32 <<< dim3(1), dim3(BUF_SIZE) >>> (d_out);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 32): "
<< time * 1e-3f / repeat << " (us)\n";
cudaMemcpy(out, d_out, sizeof(int) * BUF_SIZE, cudaMemcpyDeviceToHost);
verifyBroadcast(out, 32);
std::cout << "Broadcast using the shuffle function (subgroup sizes 8, 16, and 32) \n";
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
bcast_shfl_sg8 <<< dim3(1), dim3(BUF_SIZE) >>> (PATTERN, d_out);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 8): "
<< time * 1e-3f / repeat << " (us)\n";
cudaMemcpy(out, d_out, sizeof(int) * BUF_SIZE, cudaMemcpyDeviceToHost);
verifyBroadcast(out, 8, PATTERN);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
bcast_shfl_sg16 <<< dim3(1), dim3(BUF_SIZE) >>> (PATTERN, d_out);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 16): "
<< time * 1e-3f / repeat << " (us)\n";
cudaMemcpy(out, d_out, sizeof(int) * BUF_SIZE, cudaMemcpyDeviceToHost);
verifyBroadcast(out, 16, PATTERN);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++)
bcast_shfl_sg32 <<< dim3(1), dim3(BUF_SIZE) >>> (PATTERN, d_out);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 32): "
<< time * 1e-3f / repeat << " (us)\n";
cudaMemcpy(out, d_out, sizeof(int) * BUF_SIZE, cudaMemcpyDeviceToHost);
verifyBroadcast(out, 32, PATTERN);
free(out);
cudaFree(d_out);
std::cout << "matrix transpose using the shuffle function (subgroup sizes are 8, 16, and 32)\n";
const int total = 1 << 27; // total number of elements in a matrix
float* Matrix = (float*)malloc(total * sizeof(float));
float* TransposeMatrix = (float*)malloc(total * sizeof(float));
float* cpuTransposeMatrix = (float*)malloc(total * sizeof(float));
// initialize the input data
for (int i = 0; i < total; i++) {
Matrix[i] = (float)i * 10.0f;
}
float *gpuMatrix;
float *gpuTransposeMatrix;
// allocate the memory on the device side
cudaMalloc((void **)&gpuMatrix, total * sizeof(float));
cudaMalloc((void **)&gpuTransposeMatrix, total * sizeof(float));
cudaMemcpy(gpuMatrix, Matrix, total * sizeof(float), cudaMemcpyHostToDevice);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat2; n++)
transpose_shfl <<< dim3(total/8), dim3(8) >>> (gpuTransposeMatrix, gpuMatrix);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 8): "
<< time * 1e-3f / repeat2 << " (us)\n";
// Memory transfer from device to host
cudaMemcpy(TransposeMatrix, gpuTransposeMatrix, total * sizeof(float), cudaMemcpyDeviceToHost);
matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, total/8, 8);
verifyTransposeMatrix(TransposeMatrix, cpuTransposeMatrix, total, 8);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat2; n++)
transpose_shfl <<< dim3(total/16), dim3(16) >>> (gpuTransposeMatrix, gpuMatrix);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 16): "
<< time * 1e-3f / repeat2 << " (us)\n";
// Memory transfer from device to host
cudaMemcpy(TransposeMatrix, gpuTransposeMatrix, total * sizeof(float), cudaMemcpyDeviceToHost);
matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, total/16, 16);
verifyTransposeMatrix(TransposeMatrix, cpuTransposeMatrix, total, 16);
begin = std::chrono::steady_clock::now();
for (int n = 0; n < repeat2; n++)
transpose_shfl <<< dim3(total/32), dim3(32) >>> (gpuTransposeMatrix, gpuMatrix);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
std::cout << "Average kernel time (subgroup size = 32): "
<< time * 1e-3f / repeat2 << " (us)\n";
// Memory transfer from device to host
cudaMemcpy(TransposeMatrix, gpuTransposeMatrix, total * sizeof(float), cudaMemcpyDeviceToHost);
matrixTransposeCPUReference(cpuTransposeMatrix, Matrix, total/32, 32);
verifyTransposeMatrix(TransposeMatrix, cpuTransposeMatrix, total, 32);
// free the resources
cudaFree(gpuMatrix);
cudaFree(gpuTransposeMatrix);
free(Matrix);
free(TransposeMatrix);
free(cpuTransposeMatrix);
return 0;
}
|
175a185e179c14cffdeeca4efaffa95475957455.hip | // !!! This is a file automatically generated by hipify!!!
#include "binarization.cuh"
#include "constants.cuh"
#include "errors.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__
void binarized(
char *cellValidities,
char *cellValues,
unsigned int *binarizedValidities,
unsigned int *binarizedValues) {
int idxMinutia = blockIdx.x;
int idxInt = threadIdx.x;
int intPerCylinder = NC / BITS;
int idx = idxMinutia * intPerCylinder + idxInt;
int idxBit = idxMinutia * NC + idxInt * BITS;
unsigned int validity = 0, value = 0;
for (int i = 0; i < BITS; ++i) {
validity <<= 1U;
validity |= cellValidities[idxBit+i];
value <<= 1U;
value |= cellValues[idxBit+i];
}
binarizedValidities[idx] = validity;
binarizedValues[idx] = value;
}
__host__
void devBinarizedTemplate(
const int n,
char *devCellValidities,
char *devCellValues,
unsigned int *devBinarizedValidities,
unsigned int *devBinarizedValues) {
int intPerCylinder = NC/BITS;
hipLaunchKernelGGL(( binarized), dim3(n), dim3(intPerCylinder), 0, 0,
devCellValidities, devCellValues, devBinarizedValidities, devBinarizedValues);
handleError(
hipPeekAtLastError());
}
| 175a185e179c14cffdeeca4efaffa95475957455.cu | #include "binarization.cuh"
#include "constants.cuh"
#include "errors.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__
void binarized(
char *cellValidities,
char *cellValues,
unsigned int *binarizedValidities,
unsigned int *binarizedValues) {
int idxMinutia = blockIdx.x;
int idxInt = threadIdx.x;
int intPerCylinder = NC / BITS;
int idx = idxMinutia * intPerCylinder + idxInt;
int idxBit = idxMinutia * NC + idxInt * BITS;
unsigned int validity = 0, value = 0;
for (int i = 0; i < BITS; ++i) {
validity <<= 1U;
validity |= cellValidities[idxBit+i];
value <<= 1U;
value |= cellValues[idxBit+i];
}
binarizedValidities[idx] = validity;
binarizedValues[idx] = value;
}
__host__
void devBinarizedTemplate(
const int n,
char *devCellValidities,
char *devCellValues,
unsigned int *devBinarizedValidities,
unsigned int *devBinarizedValues) {
int intPerCylinder = NC/BITS;
binarized<<<n, intPerCylinder>>>(
devCellValidities, devCellValues, devBinarizedValidities, devBinarizedValues);
handleError(
cudaPeekAtLastError());
}
|
15c5f0db53977f4cf9cf0bf0ebfc081e856865f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* bfs2_task_sgpu.cu
*
* Created on: Mar 28, 2017
* Author: chaoliu
*/
#include "bfs2_task_mgpu.h"
#include "bfs2_kernel.h"
#include "../../../common/helper_err.h"
thread_local int bfsMGPU::local_numNodes;
thread_local int bfsMGPU::local_startNodeIndex;
void bfsMGPU::initImpl(Node_t *graph_nodes,
Edge_t *graph_edges,
int *shortestPath,
int total_graph_nodes,
int total_graph_edges,
int src_node){
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" begin init ...\n";
this->graph_nodes = graph_nodes;
this->graph_edges = graph_edges;
this->shortestPath = shortestPath;
this->total_graph_edges = total_graph_edges;
this->total_graph_nodes = total_graph_nodes;
this->src_node = src_node;
this->g_spath = new int[total_graph_nodes];
memcpy(this->g_spath, shortestPath, total_graph_nodes*sizeof(int));
this->spath_array = new int[total_graph_nodes*__numLocalThreads];
memset(this->spath_array, 0, total_graph_nodes*__numLocalThreads*sizeof(int));
this->g_frontWave = new uint8_t[total_graph_nodes];
memset(this->g_frontWave, 0, total_graph_nodes*sizeof(uint8_t));
this->nextWave_array = new uint8_t[total_graph_nodes*__numLocalThreads];
memset(this->nextWave_array, 0, total_graph_nodes*__numLocalThreads*sizeof(uint8_t));
this->stopflag_array = new uint8_t[__numLocalThreads];
}
intra_Barrier();
int nodesPerThread = total_graph_nodes / __numLocalThreads;
if(__localThreadId<total_graph_nodes%__numLocalThreads){
local_numNodes = nodesPerThread+1;
local_startNodeIndex = __localThreadId*(nodesPerThread+1);
}
else{
local_numNodes = nodesPerThread;
local_startNodeIndex = __localThreadId*nodesPerThread + total_graph_nodes%__numLocalThreads;
}
intra_Barrier();
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" finish initImpl.\n";
}
}
void bfsMGPU::runImpl(double runtime[][5],
int blocksize,
int batch,
MemType memtype){
if(__localThreadId == 0){
std::cout<<getCurrentTask()->getName()<<" begin run ..."<<std::endl;
}
Timer timer, timer0;
double totaltime;
GpuData<uint8_t> graph_nextWaveFlag(total_graph_nodes, memtype);
graph_nextWaveFlag.initH(0);
GpuData<uint8_t> graph_frontWaveFlag(local_numNodes, memtype);
//graph_frontWaveFlag.initH(0);
stopflag_array[__localThreadId]=1;
bool stopflag = false;
if(src_node>=local_startNodeIndex &&
src_node<local_startNodeIndex+local_numNodes){
g_frontWave[src_node]=1;
stopflag_array[__localThreadId]=0;
}
GpuData<Node_t> nodes(total_graph_nodes, memtype);
GpuData<Edge_t> edges(total_graph_edges, memtype);
GpuData<int> spath(total_graph_nodes, memtype);
nodes.initH(graph_nodes);
edges.initH(graph_edges);
spath.initH(shortestPath);
intra_Barrier();
//std::cout<<total_graph_nodes<<" "<<total_graph_edges<<" "<<src_node<<std::endl;
/*
* copyin
*/
timer0.start();
double copyinTime = 0;
timer.start();
nodes.sync();
edges.sync();
//spath.sync();
//graph_frontWaveFlag.sync();
graph_nextWaveFlag.sync();
copyinTime = timer.stop();
/*
* do bfs search
*/
//std::cout<<"start bfs processing ..."<<std::endl;
double kernelTime=0;
double copyoutTime=0;
double hostcompTime = 0;
while(stopflag==false){
stopflag = true;
if(stopflag_array[__localThreadId]==0){
stopflag_array[__localThreadId] =1;
timer.start();
graph_frontWaveFlag.initD(g_frontWave+local_startNodeIndex);
spath.initD(g_spath);
graph_nextWaveFlag.initD(0);
copyinTime += timer.stop();
timer.start();
dim3 block(blocksize, 1, 1);
dim3 grid((local_numNodes+blocksize*batch-1)/(blocksize*batch),1,1);
hipLaunchKernelGGL(( bfs_findFront), dim3(grid), dim3(block), 0, __streamId,
nodes.getD(),
edges.getD(),
spath.getD(true),
graph_frontWaveFlag.getD(true),
graph_nextWaveFlag.getD(true),
local_numNodes,
batch,
local_startNodeIndex);
checkCudaErr(hipGetLastError());
checkCudaErr(hipStreamSynchronize(__streamId));
//checkCudaErr(hipDeviceSynchronize());
kernelTime += timer.stop();
timer.start();
graph_nextWaveFlag.fetchD(nextWave_array+__localThreadId*total_graph_nodes);
spath.fetchD(spath_array+__localThreadId*total_graph_nodes);
copyoutTime += timer.stop();
}
intra_Barrier();
timer.start();
//std::cout<<local_startNodeIndex<<" "<<local_numNodes<<std::endl;
for(int j=0; j<__numLocalThreads; j++){
for(int i=local_startNodeIndex; i<local_startNodeIndex+local_numNodes; i++){
//for(int j=0; j<__numLocalThreads; j++){
if(nextWave_array[j*total_graph_nodes+i]==1 && g_frontWave[i]==0){
g_frontWave[i] = 1;
g_spath[i] = spath_array[j*total_graph_nodes+i];
if(stopflag_array[__localThreadId]==1){
stopflag_array[__localThreadId]=0;
}
nextWave_array[j*total_graph_nodes+i] = 0;
}
}
}
hostcompTime += timer.stop();
intra_Barrier();
for(int i=0; i<__numLocalThreads; i++){
if(stopflag_array[i] == 0){
stopflag = false;
break;
}
}
intra_Barrier();
//std::cout<<__localThreadId<<": "<<stopflag<<std::endl;
/*if(__localThreadId==0){
std::cout<<__localThreadId<<": "<<(int)stopflag<<std::endl;
}*/
}
intra_Barrier();
totaltime = timer0.stop();
runtime[__localThreadId][0] = totaltime;
//runtime[0] = kernelTime + copyinTime + copyoutTime;
runtime[__localThreadId][1] = kernelTime;
runtime[__localThreadId][2] = copyinTime;
runtime[__localThreadId][3] = copyoutTime;
runtime[__localThreadId][4] = hostcompTime;
intra_Barrier();
if(__localThreadId ==0){
memcpy(shortestPath, g_spath, sizeof(int)*total_graph_nodes);
delete g_spath;
delete spath_array;
delete nextWave_array;
delete g_frontWave;
delete stopflag_array;
std::cout<<"task: "<<getCurrentTask()->getName()<<" thread "<<__localThreadId<<" finish runImpl.\n";
}
}
| 15c5f0db53977f4cf9cf0bf0ebfc081e856865f1.cu | /*
* bfs2_task_sgpu.cu
*
* Created on: Mar 28, 2017
* Author: chaoliu
*/
#include "bfs2_task_mgpu.h"
#include "bfs2_kernel.h"
#include "../../../common/helper_err.h"
thread_local int bfsMGPU::local_numNodes;
thread_local int bfsMGPU::local_startNodeIndex;
void bfsMGPU::initImpl(Node_t *graph_nodes,
Edge_t *graph_edges,
int *shortestPath,
int total_graph_nodes,
int total_graph_edges,
int src_node){
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" begin init ...\n";
this->graph_nodes = graph_nodes;
this->graph_edges = graph_edges;
this->shortestPath = shortestPath;
this->total_graph_edges = total_graph_edges;
this->total_graph_nodes = total_graph_nodes;
this->src_node = src_node;
this->g_spath = new int[total_graph_nodes];
memcpy(this->g_spath, shortestPath, total_graph_nodes*sizeof(int));
this->spath_array = new int[total_graph_nodes*__numLocalThreads];
memset(this->spath_array, 0, total_graph_nodes*__numLocalThreads*sizeof(int));
this->g_frontWave = new uint8_t[total_graph_nodes];
memset(this->g_frontWave, 0, total_graph_nodes*sizeof(uint8_t));
this->nextWave_array = new uint8_t[total_graph_nodes*__numLocalThreads];
memset(this->nextWave_array, 0, total_graph_nodes*__numLocalThreads*sizeof(uint8_t));
this->stopflag_array = new uint8_t[__numLocalThreads];
}
intra_Barrier();
int nodesPerThread = total_graph_nodes / __numLocalThreads;
if(__localThreadId<total_graph_nodes%__numLocalThreads){
local_numNodes = nodesPerThread+1;
local_startNodeIndex = __localThreadId*(nodesPerThread+1);
}
else{
local_numNodes = nodesPerThread;
local_startNodeIndex = __localThreadId*nodesPerThread + total_graph_nodes%__numLocalThreads;
}
intra_Barrier();
if(__localThreadId ==0){
std::cout<<"task: "<<getCurrentTask()->getName()<<" finish initImpl.\n";
}
}
void bfsMGPU::runImpl(double runtime[][5],
int blocksize,
int batch,
MemType memtype){
if(__localThreadId == 0){
std::cout<<getCurrentTask()->getName()<<" begin run ..."<<std::endl;
}
Timer timer, timer0;
double totaltime;
GpuData<uint8_t> graph_nextWaveFlag(total_graph_nodes, memtype);
graph_nextWaveFlag.initH(0);
GpuData<uint8_t> graph_frontWaveFlag(local_numNodes, memtype);
//graph_frontWaveFlag.initH(0);
stopflag_array[__localThreadId]=1;
bool stopflag = false;
if(src_node>=local_startNodeIndex &&
src_node<local_startNodeIndex+local_numNodes){
g_frontWave[src_node]=1;
stopflag_array[__localThreadId]=0;
}
GpuData<Node_t> nodes(total_graph_nodes, memtype);
GpuData<Edge_t> edges(total_graph_edges, memtype);
GpuData<int> spath(total_graph_nodes, memtype);
nodes.initH(graph_nodes);
edges.initH(graph_edges);
spath.initH(shortestPath);
intra_Barrier();
//std::cout<<total_graph_nodes<<" "<<total_graph_edges<<" "<<src_node<<std::endl;
/*
* copyin
*/
timer0.start();
double copyinTime = 0;
timer.start();
nodes.sync();
edges.sync();
//spath.sync();
//graph_frontWaveFlag.sync();
graph_nextWaveFlag.sync();
copyinTime = timer.stop();
/*
* do bfs search
*/
//std::cout<<"start bfs processing ..."<<std::endl;
double kernelTime=0;
double copyoutTime=0;
double hostcompTime = 0;
while(stopflag==false){
stopflag = true;
if(stopflag_array[__localThreadId]==0){
stopflag_array[__localThreadId] =1;
timer.start();
graph_frontWaveFlag.initD(g_frontWave+local_startNodeIndex);
spath.initD(g_spath);
graph_nextWaveFlag.initD(0);
copyinTime += timer.stop();
timer.start();
dim3 block(blocksize, 1, 1);
dim3 grid((local_numNodes+blocksize*batch-1)/(blocksize*batch),1,1);
bfs_findFront<<<grid, block, 0, __streamId>>>(
nodes.getD(),
edges.getD(),
spath.getD(true),
graph_frontWaveFlag.getD(true),
graph_nextWaveFlag.getD(true),
local_numNodes,
batch,
local_startNodeIndex);
checkCudaErr(cudaGetLastError());
checkCudaErr(cudaStreamSynchronize(__streamId));
//checkCudaErr(cudaDeviceSynchronize());
kernelTime += timer.stop();
timer.start();
graph_nextWaveFlag.fetchD(nextWave_array+__localThreadId*total_graph_nodes);
spath.fetchD(spath_array+__localThreadId*total_graph_nodes);
copyoutTime += timer.stop();
}
intra_Barrier();
timer.start();
//std::cout<<local_startNodeIndex<<" "<<local_numNodes<<std::endl;
for(int j=0; j<__numLocalThreads; j++){
for(int i=local_startNodeIndex; i<local_startNodeIndex+local_numNodes; i++){
//for(int j=0; j<__numLocalThreads; j++){
if(nextWave_array[j*total_graph_nodes+i]==1 && g_frontWave[i]==0){
g_frontWave[i] = 1;
g_spath[i] = spath_array[j*total_graph_nodes+i];
if(stopflag_array[__localThreadId]==1){
stopflag_array[__localThreadId]=0;
}
nextWave_array[j*total_graph_nodes+i] = 0;
}
}
}
hostcompTime += timer.stop();
intra_Barrier();
for(int i=0; i<__numLocalThreads; i++){
if(stopflag_array[i] == 0){
stopflag = false;
break;
}
}
intra_Barrier();
//std::cout<<__localThreadId<<": "<<stopflag<<std::endl;
/*if(__localThreadId==0){
std::cout<<__localThreadId<<": "<<(int)stopflag<<std::endl;
}*/
}
intra_Barrier();
totaltime = timer0.stop();
runtime[__localThreadId][0] = totaltime;
//runtime[0] = kernelTime + copyinTime + copyoutTime;
runtime[__localThreadId][1] = kernelTime;
runtime[__localThreadId][2] = copyinTime;
runtime[__localThreadId][3] = copyoutTime;
runtime[__localThreadId][4] = hostcompTime;
intra_Barrier();
if(__localThreadId ==0){
memcpy(shortestPath, g_spath, sizeof(int)*total_graph_nodes);
delete g_spath;
delete spath_array;
delete nextWave_array;
delete g_frontWave;
delete stopflag_array;
std::cout<<"task: "<<getCurrentTask()->getName()<<" thread "<<__localThreadId<<" finish runImpl.\n";
}
}
|
707768da4e4dd111892b598f7c56610c39df714b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <thrust/device_vector.h>
#include <thrust/shared_algorithms.h>
using namespace cv;
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
__global__ void blendKernel(uchar * input1,uchar * input2, uchar * output,float alpha, int size)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
while(index<size)
{
output[index] = input1[index]*alpha + input2[index]*(1-alpha);
index+=stride;
}
}
int main(int argc, char const *argv[]) {
int dim = 4096;
if(argc ==2)
{
dim = atoi(argv[1]);
}
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat input1 = imread("../opencv/shared/car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat input2 = imread("../opencv/shared/car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat temp1;
resize(input1,temp1,Size(dim,dim));
input1 = temp1;
Mat temp2;
resize(input2,temp2,Size(dim,dim));
input2 = temp2;
uchar * d_input1, *d_input2,*d_output;
hipMalloc((void **)&d_input1,sizeof(uchar)*dim*dim);
hipMalloc((void **)&d_input2,sizeof(uchar)*dim*dim);
hipMalloc((void **)&d_output,sizeof(uchar)*dim*dim);
uchar * h_output = new uchar[dim*dim];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for(int i = 0; i<10;i++)
{
hipMemcpy(d_input1,input1.ptr(),sizeof(uchar)*dim*dim,hipMemcpyHostToDevice);
hipMemcpy(d_input2,input2.ptr(),sizeof(uchar)*dim*dim,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( blendKernel), dim3(30),dim3(1024), 0, 0, d_input1,d_input2,d_output,0.3,dim*dim);
hipMemcpy(h_output,d_output,sizeof(uchar)*dim*dim,hipMemcpyDeviceToHost);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time_in_ms;
hipEventElapsedTime(&time_in_ms,start,stop);
printf("%f\n",time_in_ms);
Mat output (Size(input1.cols,input1.rows),CV_8UC1,h_output);
#ifdef OWRITE
imwrite("blend-input1.png",input1);
imwrite("blend-input2.png",input2);
imwrite("blend-output.png",output);
#endif
#ifdef SHOW
Mat out;
resize(input1,temp1,Size(512,512));
imshow("blend-input1.png",temp1);
resize(input2,temp2,Size(512,512));
imshow("blend-input2.png",temp2);
resize(output,out,Size(512,512));
imshow("blend-output.png",out);
waitKey(0);
#endif
return 0;
}
| 707768da4e4dd111892b598f7c56610c39df714b.cu | #include <opencv2/opencv.hpp>
#include <thrust/device_vector.h>
#include <thrust/shared_algorithms.h>
using namespace cv;
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
__global__ void blendKernel(uchar * input1,uchar * input2, uchar * output,float alpha, int size)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
while(index<size)
{
output[index] = input1[index]*alpha + input2[index]*(1-alpha);
index+=stride;
}
}
int main(int argc, char const *argv[]) {
int dim = 4096;
if(argc ==2)
{
dim = atoi(argv[1]);
}
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat input1 = imread("../opencv/shared/car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat input2 = imread("../opencv/shared/car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat temp1;
resize(input1,temp1,Size(dim,dim));
input1 = temp1;
Mat temp2;
resize(input2,temp2,Size(dim,dim));
input2 = temp2;
uchar * d_input1, *d_input2,*d_output;
cudaMalloc((void **)&d_input1,sizeof(uchar)*dim*dim);
cudaMalloc((void **)&d_input2,sizeof(uchar)*dim*dim);
cudaMalloc((void **)&d_output,sizeof(uchar)*dim*dim);
uchar * h_output = new uchar[dim*dim];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i<10;i++)
{
cudaMemcpy(d_input1,input1.ptr(),sizeof(uchar)*dim*dim,cudaMemcpyHostToDevice);
cudaMemcpy(d_input2,input2.ptr(),sizeof(uchar)*dim*dim,cudaMemcpyHostToDevice);
blendKernel<<<30,1024>>>(d_input1,d_input2,d_output,0.3,dim*dim);
cudaMemcpy(h_output,d_output,sizeof(uchar)*dim*dim,cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_in_ms;
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("%f\n",time_in_ms);
Mat output (Size(input1.cols,input1.rows),CV_8UC1,h_output);
#ifdef OWRITE
imwrite("blend-input1.png",input1);
imwrite("blend-input2.png",input2);
imwrite("blend-output.png",output);
#endif
#ifdef SHOW
Mat out;
resize(input1,temp1,Size(512,512));
imshow("blend-input1.png",temp1);
resize(input2,temp2,Size(512,512));
imshow("blend-input2.png",temp2);
resize(output,out,Size(512,512));
imshow("blend-output.png",out);
waitKey(0);
#endif
return 0;
}
|
497fcec47d13b8f82dc25b7aed30d6a7db2ba097.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "head.h"
#include "config.h"
#include "helper_cuda.h"
#include "triplet.h"
#include "matrix_form_cuda_jacobi_solver.h"
using namespace std;
template<typename T>
__global__ void set_Dinverseb(const T* dig_A_coefficient_inverse_device,const T* b_device,T* dig_inverse_b_device,const int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N)
{
dig_inverse_b_device[i] = dig_A_coefficient_inverse_device[i]*b_device[i];
}
}
template<typename T>
__global__ void init(T* x_device_iteration,int N_2)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N_2)
{
x_device_iteration[i]=0;
}
}
template<typename T>
__global__ void get_tol_csr(T* x_device_iteration,T* residual_device,int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N)
{
residual_device[i]=x_device_iteration[i]-x_device_iteration[i+N];
}
}
namespace wtyatzoo
{
template<typename T>
matrix_form_cuda_jacobi_solver<T>::matrix_form_cuda_jacobi_solver(const T* val_A,const int* col_index_A,const int* row_offset_A,const int num_non_zero_A,const int N)
{
int i,j;
//need to free
T* dig_A_coefficient_inverse=(T*)malloc(sizeof(T)*N);
this->num_non_zero_T=num_non_zero_A-N;
this->N=N;
// printf("num_non_zero_T :%d \n N :%d\n num_non_zero_A :%d\n",num_non_zero_T,N,num_non_zero_A);
// need to free!
int * row_offset_T=(int *)malloc(sizeof(int )*(N+1));
int * col_index_T=(int *)malloc(sizeof(int )*num_non_zero_T);
T* val_T=(T*)malloc(sizeof(T)*num_non_zero_T);
int offset_now=0;
int row_now=0;
row_offset_T[0]=0;
for(i=0;i<N;++i)
{
for(j=row_offset_A[i];j<row_offset_A[i+1];++j)
{
if(col_index_A[j]==i)
{
dig_A_coefficient_inverse[i]=1.0/val_A[j];
}
else if(col_index_A[j]!=i)
{
val_T[offset_now]=val_A[j];
col_index_T[offset_now]=col_index_A[j];
offset_now++;
}
}
row_offset_T[i+1]=offset_now;
}
for(i=0;i<N;++i)
{
for(j=row_offset_T[i];j<row_offset_T[i+1];++j)
{
val_T[j]=val_T[j]*dig_A_coefficient_inverse[i]*-1.0;
}
}
checkCudaErrors(hipMalloc((void**)&val_T_device,sizeof(T)*num_non_zero_T));
checkCudaErrors(hipMalloc((void**)&col_index_T_device,sizeof(int )*num_non_zero_T));
checkCudaErrors(hipMalloc((void**)&row_offset_T_device,sizeof(int )*(N+1)));
checkCudaErrors(hipMalloc((void**)&dig_A_coefficient_inverse_device,sizeof(T)*N));
checkCudaErrors(hipMemcpy(val_T_device,val_T,sizeof(T)*num_non_zero_T,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(col_index_T_device,col_index_T,sizeof(int )*num_non_zero_T,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(row_offset_T_device,row_offset_T,sizeof(int )*(N+1),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dig_A_coefficient_inverse_device,dig_A_coefficient_inverse,sizeof(T)*N,hipMemcpyHostToDevice));
//cpu's used memory
{
free(dig_A_coefficient_inverse);
free(row_offset_T);
free(col_index_T);
free(val_T);
}
my_cublas_handle=0;
checkCudaErrors(hipblasCreate(&my_cublas_handle));
my_cusparse_handle=0;
checkCudaErrors(hipsparseCreate(& my_cusparse_handle));
my_descr=0;
checkCudaErrors(hipsparseCreateMatDescr(& my_descr));
hipsparseSetMatType(my_descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(my_descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void**)&b_device,sizeof(T)*N));
checkCudaErrors(hipMalloc((void**)&x_device_iteration,sizeof(T)*2*N));
checkCudaErrors(hipMalloc((void**)&residual_device,sizeof(T)*N));
checkCudaErrors(hipMalloc((void**)&dig_inverse_b_device,sizeof(T)*N));
}
// assume that the vector's i & j pair is without repetition and in order, so before this function, the user need to make the data be satisfied with the assumption.
template<typename T>
matrix_form_cuda_jacobi_solver<T>::matrix_form_cuda_jacobi_solver(const std::vector<triplet<T> > &mytriplets_A,const int N)
{
int i,j;
vector<triplet<T> > mytriplets;
while(!mytriplets.empty())
{
mytriplets.pop_back();
}
//need to free
T* dig_A_coefficient_inverse=(T*)malloc(sizeof(T)*N);
int num_non_zero_A=mytriplets_A.size();
for(i=0;i<num_non_zero_A;++i)
{
triplet<T> tri_now=mytriplets_A[i];
if(tri_now.col!=tri_now.row)
{
mytriplets.push_back(tri_now);
}
else if(tri_now.col==tri_now.row)
{
dig_A_coefficient_inverse[tri_now.col]=1.0/tri_now.val;
}
}
this->num_non_zero_T=mytriplets.size();
this->N=N;
// printf("num_non_zero_T :%d \n N :%d\n num_non_zero_A :%d\n",num_non_zero_T,N,num_non_zero_A);
for(i=0;i<num_non_zero_T;++i)
{
int row_now=mytriplets[i].row;
mytriplets[i].val=mytriplets[i].val*dig_A_coefficient_inverse[row_now]*-1;
}
// need to free!
int * row_offset_T=(int *)malloc(sizeof(int )*(N+1));
int * col_index_T=(int *)malloc(sizeof(int )*num_non_zero_T);
T* val_T=(T*)malloc(sizeof(T)*num_non_zero_T);
int offset_now=0;
int row_now=0;
row_offset_T[0]=0;
for(i=0;i<num_non_zero_T;++i)
{
if(mytriplets[i].row==row_now)
{
row_offset_T[row_now]=offset_now;
row_now++; offset_now++;
}
else
{
offset_now++;
}
col_index_T[i]=mytriplets[i].col;
val_T[i]=mytriplets[i].val;
}
row_offset_T[N]=num_non_zero_T;
checkCudaErrors(hipMalloc((void**)&val_T_device,sizeof(T)*num_non_zero_T));
checkCudaErrors(hipMalloc((void**)&col_index_T_device,sizeof(int )*num_non_zero_T));
checkCudaErrors(hipMalloc((void**)&row_offset_T_device,sizeof(int )*(N+1)));
checkCudaErrors(hipMalloc((void**)&dig_A_coefficient_inverse_device,sizeof(T)*N));
checkCudaErrors(hipMemcpy(val_T_device,val_T,sizeof(T)*num_non_zero_T,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(col_index_T_device,col_index_T,sizeof(int )*num_non_zero_T,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(row_offset_T_device,row_offset_T,sizeof(int )*(N+1),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dig_A_coefficient_inverse_device,dig_A_coefficient_inverse,sizeof(T)*N,hipMemcpyHostToDevice));
//cpu's used memory
{
free(dig_A_coefficient_inverse);
free(row_offset_T);
free(col_index_T);
free(val_T);
}
my_cublas_handle=0;
checkCudaErrors(hipblasCreate(&my_cublas_handle));
my_cusparse_handle=0;
checkCudaErrors(hipsparseCreate(& my_cusparse_handle));
my_descr=0;
checkCudaErrors(hipsparseCreateMatDescr(& my_descr));
hipsparseSetMatType(my_descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(my_descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void**)&b_device,sizeof(T)*N));
checkCudaErrors(hipMalloc((void**)&x_device_iteration,sizeof(T)*2*N));
checkCudaErrors(hipMalloc((void**)&residual_device,sizeof(T)*N));
checkCudaErrors(hipMalloc((void**)&dig_inverse_b_device,sizeof(T)*N));
}
template<typename T>
matrix_form_cuda_jacobi_solver<T>::matrix_form_cuda_jacobi_solver( T**A,const T*dig_A_coefficient,const int N)
{
// assemble the sparse matrix structure on GPU based on the dese matrix input
int i,j;
vector<triplet<T> > mytriplets;
while(!mytriplets.empty())
{
mytriplets.pop_back();
}
//need to free
T* dig_A_coefficient_inverse=(T*)malloc(sizeof(T)*N);
T EPS=1e-10;
for(i=0;i<N;++i)
{
dig_A_coefficient_inverse[i] = 1.0/dig_A_coefficient[i];
for(j=0;j<N;++j)
{
// printf("%lf\n",A[i][j]);
if(fabs(A[i][j])>=EPS&&(i!=j))
{
mytriplets.push_back(triplet<T>(i,j,-1*A[i][j]*dig_A_coefficient_inverse[i]));
}
}
}
this->num_non_zero_T=mytriplets.size();
this->N=N;
// printf("num_non_zero_T :%d\n N :%d\n",num_non_zero_T,N);
// need to free!
int * row_offset_T=(int *)malloc(sizeof(int )*(N+1));
int * col_index_T=(int *)malloc(sizeof(int )*num_non_zero_T);
T* val_T=(T*)malloc(sizeof(T)*num_non_zero_T);
int offset_now=0;
int row_now=0;
row_offset_T[0]=0;
for(i=0;i<num_non_zero_T;++i)
{
if(mytriplets[i].row==row_now)
{
row_offset_T[row_now]=offset_now;
row_now++; offset_now++;
}
else
{
offset_now++;
}
col_index_T[i]=mytriplets[i].col;
val_T[i]=mytriplets[i].val;
}
row_offset_T[N]=num_non_zero_T;
checkCudaErrors(hipMalloc((void**)&val_T_device,sizeof(T)*num_non_zero_T));
checkCudaErrors(hipMalloc((void**)&col_index_T_device,sizeof(int )*num_non_zero_T));
checkCudaErrors(hipMalloc((void**)&row_offset_T_device,sizeof(int )*(N+1)));
checkCudaErrors(hipMalloc((void**)&dig_A_coefficient_inverse_device,sizeof(T)*N));
checkCudaErrors(hipMemcpy(val_T_device,val_T,sizeof(T)*num_non_zero_T,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(col_index_T_device,col_index_T,sizeof(int )*num_non_zero_T,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(row_offset_T_device,row_offset_T,sizeof(int )*(N+1),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dig_A_coefficient_inverse_device,dig_A_coefficient_inverse,sizeof(T)*N,hipMemcpyHostToDevice));
//cpu's used memory
{
free(dig_A_coefficient_inverse);
free(row_offset_T);
free(col_index_T);
free(val_T);
}
my_cublas_handle=0;
checkCudaErrors(hipblasCreate(&my_cublas_handle));
my_cusparse_handle=0;
checkCudaErrors(hipsparseCreate(& my_cusparse_handle));
my_descr=0;
checkCudaErrors(hipsparseCreateMatDescr(& my_descr));
hipsparseSetMatType(my_descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(my_descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void**)&b_device,sizeof(T)*N));
checkCudaErrors(hipMalloc((void**)&x_device_iteration,sizeof(T)*2*N));
checkCudaErrors(hipMalloc((void**)&residual_device,sizeof(T)*N));
checkCudaErrors(hipMalloc((void**)&dig_inverse_b_device,sizeof(T)*N));
}
template<typename T>
matrix_form_cuda_jacobi_solver<T>::~matrix_form_cuda_jacobi_solver()
{
// printf("GG\n");
checkCudaErrors(hipFree(dig_A_coefficient_inverse_device));
checkCudaErrors(hipFree(val_T_device));
checkCudaErrors(hipFree(col_index_T_device));
checkCudaErrors(hipFree(row_offset_T_device));
checkCudaErrors(hipFree(b_device));
checkCudaErrors(hipFree(dig_inverse_b_device));
checkCudaErrors(hipFree(x_device_iteration));
checkCudaErrors(hipFree(residual_device));
}
template<typename T>
int matrix_form_cuda_jacobi_solver<T>::apply(const T b[],T x[],const T tol,const int max_iteration_num,const bool given)
{
int block_size=1024;
int num_block=(int)ceil(N/(T)block_size);
int iteration_num_now=0;
// printf("apply\n");
checkCudaErrors(hipMemcpy(b_device,b,sizeof(T)*N,hipMemcpyHostToDevice));
if(given==0)
{
hipLaunchKernelGGL(( init), dim3(num_block*2),dim3(block_size), 0, 0, x_device_iteration,2*N);
}
else if(given==1)
{
checkCudaErrors(hipMemcpy(x_device_iteration,x,sizeof(T)*N,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(x_device_iteration+N,x,sizeof(T)*N,hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( set_Dinverseb), dim3(num_block),dim3(block_size) , 0, 0, dig_A_coefficient_inverse_device,b_device,dig_inverse_b_device,N); //no cuBlas API support
int new_iteration=1,old_iteration;
residual_sum_host=0;
const T alpha=1.0, beta=0;
T test_f_d;
const char* p_char=typeid(test_f_d).name();
if((*p_char)=='d')
{
for(iteration_num_now=0;iteration_num_now<max_iteration_num;iteration_num_now++)
{
old_iteration=!new_iteration;
hipsparseDcsrmv(my_cusparse_handle,HIPSPARSE_OPERATION_NON_TRANSPOSE,N,N, num_non_zero_T,(const double*)&alpha,my_descr,(const double*)val_T_device,row_offset_T_device,col_index_T_device,(const double*)x_device_iteration+(old_iteration*N),(const double*)&beta,(double*)x_device_iteration+(new_iteration*N));
hipblasDaxpy(my_cublas_handle,N,(const double*)&alpha,(const double*)dig_inverse_b_device,1,(double*)x_device_iteration+(new_iteration*N),1);
hipLaunchKernelGGL(( get_tol_csr), dim3(num_block),dim3(block_size), 0, 0, x_device_iteration,residual_device,N);
hipblasDnrm2(my_cublas_handle,N,(const double*)residual_device,1,(double*)&residual_sum_host);
// printf("residual_sum_host :: %lf\n",residual_sum_host);
if(pow(residual_sum_host,2)<tol)
{
break;
}
new_iteration=!new_iteration;
}
}
else if((*p_char)=='f')
{
for(iteration_num_now=0;iteration_num_now<max_iteration_num;iteration_num_now++)
{
old_iteration=!new_iteration;
hipsparseScsrmv(my_cusparse_handle,HIPSPARSE_OPERATION_NON_TRANSPOSE,N,N, num_non_zero_T,(const float*)&alpha,my_descr,(const float*)val_T_device,row_offset_T_device,col_index_T_device,(const float*)x_device_iteration+(old_iteration*N),(const float*)&beta,(float*)x_device_iteration+(new_iteration*N));
hipblasSaxpy(my_cublas_handle,N,(const float*)&alpha,(const float*)dig_inverse_b_device,1,(float*)x_device_iteration+(new_iteration*N),1);
hipLaunchKernelGGL(( get_tol_csr), dim3(num_block),dim3(block_size), 0, 0, x_device_iteration,residual_device,N);
hipblasSnrm2(my_cublas_handle,N,(const float*)residual_device,1,(float*)&residual_sum_host);
// printf("residual_sum_host :: %lf\n",residual_sum_host);
if(pow(residual_sum_host,2)<tol)
{
break;
}
new_iteration=!new_iteration;
}
}
printf("iteration_num_now ::%d\n",iteration_num_now);
checkCudaErrors(hipMemcpy(x,x_device_iteration, N*sizeof(T), hipMemcpyDeviceToHost));
return 0;
}
}
| 497fcec47d13b8f82dc25b7aed30d6a7db2ba097.cu | #include "head.h"
#include "config.h"
#include "helper_cuda.h"
#include "triplet.h"
#include "matrix_form_cuda_jacobi_solver.h"
using namespace std;
template<typename T>
__global__ void set_Dinverseb(const T* dig_A_coefficient_inverse_device,const T* b_device,T* dig_inverse_b_device,const int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N)
{
dig_inverse_b_device[i] = dig_A_coefficient_inverse_device[i]*b_device[i];
}
}
template<typename T>
__global__ void init(T* x_device_iteration,int N_2)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N_2)
{
x_device_iteration[i]=0;
}
}
template<typename T>
__global__ void get_tol_csr(T* x_device_iteration,T* residual_device,int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N)
{
residual_device[i]=x_device_iteration[i]-x_device_iteration[i+N];
}
}
namespace wtyatzoo
{
template<typename T>
matrix_form_cuda_jacobi_solver<T>::matrix_form_cuda_jacobi_solver(const T* val_A,const int* col_index_A,const int* row_offset_A,const int num_non_zero_A,const int N)
{
int i,j;
//need to free
T* dig_A_coefficient_inverse=(T*)malloc(sizeof(T)*N);
this->num_non_zero_T=num_non_zero_A-N;
this->N=N;
// printf("num_non_zero_T :%d \n N :%d\n num_non_zero_A :%d\n",num_non_zero_T,N,num_non_zero_A);
// need to free!
int * row_offset_T=(int *)malloc(sizeof(int )*(N+1));
int * col_index_T=(int *)malloc(sizeof(int )*num_non_zero_T);
T* val_T=(T*)malloc(sizeof(T)*num_non_zero_T);
int offset_now=0;
int row_now=0;
row_offset_T[0]=0;
for(i=0;i<N;++i)
{
for(j=row_offset_A[i];j<row_offset_A[i+1];++j)
{
if(col_index_A[j]==i)
{
dig_A_coefficient_inverse[i]=1.0/val_A[j];
}
else if(col_index_A[j]!=i)
{
val_T[offset_now]=val_A[j];
col_index_T[offset_now]=col_index_A[j];
offset_now++;
}
}
row_offset_T[i+1]=offset_now;
}
for(i=0;i<N;++i)
{
for(j=row_offset_T[i];j<row_offset_T[i+1];++j)
{
val_T[j]=val_T[j]*dig_A_coefficient_inverse[i]*-1.0;
}
}
checkCudaErrors(cudaMalloc((void**)&val_T_device,sizeof(T)*num_non_zero_T));
checkCudaErrors(cudaMalloc((void**)&col_index_T_device,sizeof(int )*num_non_zero_T));
checkCudaErrors(cudaMalloc((void**)&row_offset_T_device,sizeof(int )*(N+1)));
checkCudaErrors(cudaMalloc((void**)&dig_A_coefficient_inverse_device,sizeof(T)*N));
checkCudaErrors(cudaMemcpy(val_T_device,val_T,sizeof(T)*num_non_zero_T,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(col_index_T_device,col_index_T,sizeof(int )*num_non_zero_T,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(row_offset_T_device,row_offset_T,sizeof(int )*(N+1),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dig_A_coefficient_inverse_device,dig_A_coefficient_inverse,sizeof(T)*N,cudaMemcpyHostToDevice));
//cpu's used memory
{
free(dig_A_coefficient_inverse);
free(row_offset_T);
free(col_index_T);
free(val_T);
}
my_cublas_handle=0;
checkCudaErrors(cublasCreate(&my_cublas_handle));
my_cusparse_handle=0;
checkCudaErrors(cusparseCreate(& my_cusparse_handle));
my_descr=0;
checkCudaErrors(cusparseCreateMatDescr(& my_descr));
cusparseSetMatType(my_descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(my_descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void**)&b_device,sizeof(T)*N));
checkCudaErrors(cudaMalloc((void**)&x_device_iteration,sizeof(T)*2*N));
checkCudaErrors(cudaMalloc((void**)&residual_device,sizeof(T)*N));
checkCudaErrors(cudaMalloc((void**)&dig_inverse_b_device,sizeof(T)*N));
}
// assume that the vector's i & j pair is without repetition and in order, so before this function, the user need to make the data be satisfied with the assumption.
template<typename T>
matrix_form_cuda_jacobi_solver<T>::matrix_form_cuda_jacobi_solver(const std::vector<triplet<T> > &mytriplets_A,const int N)
{
int i,j;
vector<triplet<T> > mytriplets;
while(!mytriplets.empty())
{
mytriplets.pop_back();
}
//need to free
T* dig_A_coefficient_inverse=(T*)malloc(sizeof(T)*N);
int num_non_zero_A=mytriplets_A.size();
for(i=0;i<num_non_zero_A;++i)
{
triplet<T> tri_now=mytriplets_A[i];
if(tri_now.col!=tri_now.row)
{
mytriplets.push_back(tri_now);
}
else if(tri_now.col==tri_now.row)
{
dig_A_coefficient_inverse[tri_now.col]=1.0/tri_now.val;
}
}
this->num_non_zero_T=mytriplets.size();
this->N=N;
// printf("num_non_zero_T :%d \n N :%d\n num_non_zero_A :%d\n",num_non_zero_T,N,num_non_zero_A);
for(i=0;i<num_non_zero_T;++i)
{
int row_now=mytriplets[i].row;
mytriplets[i].val=mytriplets[i].val*dig_A_coefficient_inverse[row_now]*-1;
}
// need to free!
int * row_offset_T=(int *)malloc(sizeof(int )*(N+1));
int * col_index_T=(int *)malloc(sizeof(int )*num_non_zero_T);
T* val_T=(T*)malloc(sizeof(T)*num_non_zero_T);
int offset_now=0;
int row_now=0;
row_offset_T[0]=0;
for(i=0;i<num_non_zero_T;++i)
{
if(mytriplets[i].row==row_now)
{
row_offset_T[row_now]=offset_now;
row_now++; offset_now++;
}
else
{
offset_now++;
}
col_index_T[i]=mytriplets[i].col;
val_T[i]=mytriplets[i].val;
}
row_offset_T[N]=num_non_zero_T;
checkCudaErrors(cudaMalloc((void**)&val_T_device,sizeof(T)*num_non_zero_T));
checkCudaErrors(cudaMalloc((void**)&col_index_T_device,sizeof(int )*num_non_zero_T));
checkCudaErrors(cudaMalloc((void**)&row_offset_T_device,sizeof(int )*(N+1)));
checkCudaErrors(cudaMalloc((void**)&dig_A_coefficient_inverse_device,sizeof(T)*N));
checkCudaErrors(cudaMemcpy(val_T_device,val_T,sizeof(T)*num_non_zero_T,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(col_index_T_device,col_index_T,sizeof(int )*num_non_zero_T,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(row_offset_T_device,row_offset_T,sizeof(int )*(N+1),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dig_A_coefficient_inverse_device,dig_A_coefficient_inverse,sizeof(T)*N,cudaMemcpyHostToDevice));
//cpu's used memory
{
free(dig_A_coefficient_inverse);
free(row_offset_T);
free(col_index_T);
free(val_T);
}
my_cublas_handle=0;
checkCudaErrors(cublasCreate(&my_cublas_handle));
my_cusparse_handle=0;
checkCudaErrors(cusparseCreate(& my_cusparse_handle));
my_descr=0;
checkCudaErrors(cusparseCreateMatDescr(& my_descr));
cusparseSetMatType(my_descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(my_descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void**)&b_device,sizeof(T)*N));
checkCudaErrors(cudaMalloc((void**)&x_device_iteration,sizeof(T)*2*N));
checkCudaErrors(cudaMalloc((void**)&residual_device,sizeof(T)*N));
checkCudaErrors(cudaMalloc((void**)&dig_inverse_b_device,sizeof(T)*N));
}
template<typename T>
matrix_form_cuda_jacobi_solver<T>::matrix_form_cuda_jacobi_solver( T**A,const T*dig_A_coefficient,const int N)
{
// assemble the sparse matrix structure on GPU based on the dese matrix input
int i,j;
vector<triplet<T> > mytriplets;
while(!mytriplets.empty())
{
mytriplets.pop_back();
}
//need to free
T* dig_A_coefficient_inverse=(T*)malloc(sizeof(T)*N);
T EPS=1e-10;
for(i=0;i<N;++i)
{
dig_A_coefficient_inverse[i] = 1.0/dig_A_coefficient[i];
for(j=0;j<N;++j)
{
// printf("%lf\n",A[i][j]);
if(fabs(A[i][j])>=EPS&&(i!=j))
{
mytriplets.push_back(triplet<T>(i,j,-1*A[i][j]*dig_A_coefficient_inverse[i]));
}
}
}
this->num_non_zero_T=mytriplets.size();
this->N=N;
// printf("num_non_zero_T :%d\n N :%d\n",num_non_zero_T,N);
// need to free!
int * row_offset_T=(int *)malloc(sizeof(int )*(N+1));
int * col_index_T=(int *)malloc(sizeof(int )*num_non_zero_T);
T* val_T=(T*)malloc(sizeof(T)*num_non_zero_T);
int offset_now=0;
int row_now=0;
row_offset_T[0]=0;
for(i=0;i<num_non_zero_T;++i)
{
if(mytriplets[i].row==row_now)
{
row_offset_T[row_now]=offset_now;
row_now++; offset_now++;
}
else
{
offset_now++;
}
col_index_T[i]=mytriplets[i].col;
val_T[i]=mytriplets[i].val;
}
row_offset_T[N]=num_non_zero_T;
checkCudaErrors(cudaMalloc((void**)&val_T_device,sizeof(T)*num_non_zero_T));
checkCudaErrors(cudaMalloc((void**)&col_index_T_device,sizeof(int )*num_non_zero_T));
checkCudaErrors(cudaMalloc((void**)&row_offset_T_device,sizeof(int )*(N+1)));
checkCudaErrors(cudaMalloc((void**)&dig_A_coefficient_inverse_device,sizeof(T)*N));
checkCudaErrors(cudaMemcpy(val_T_device,val_T,sizeof(T)*num_non_zero_T,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(col_index_T_device,col_index_T,sizeof(int )*num_non_zero_T,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(row_offset_T_device,row_offset_T,sizeof(int )*(N+1),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dig_A_coefficient_inverse_device,dig_A_coefficient_inverse,sizeof(T)*N,cudaMemcpyHostToDevice));
//cpu's used memory
{
free(dig_A_coefficient_inverse);
free(row_offset_T);
free(col_index_T);
free(val_T);
}
my_cublas_handle=0;
checkCudaErrors(cublasCreate(&my_cublas_handle));
my_cusparse_handle=0;
checkCudaErrors(cusparseCreate(& my_cusparse_handle));
my_descr=0;
checkCudaErrors(cusparseCreateMatDescr(& my_descr));
cusparseSetMatType(my_descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(my_descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void**)&b_device,sizeof(T)*N));
checkCudaErrors(cudaMalloc((void**)&x_device_iteration,sizeof(T)*2*N));
checkCudaErrors(cudaMalloc((void**)&residual_device,sizeof(T)*N));
checkCudaErrors(cudaMalloc((void**)&dig_inverse_b_device,sizeof(T)*N));
}
template<typename T>
matrix_form_cuda_jacobi_solver<T>::~matrix_form_cuda_jacobi_solver()
{
// printf("GG\n");
checkCudaErrors(cudaFree(dig_A_coefficient_inverse_device));
checkCudaErrors(cudaFree(val_T_device));
checkCudaErrors(cudaFree(col_index_T_device));
checkCudaErrors(cudaFree(row_offset_T_device));
checkCudaErrors(cudaFree(b_device));
checkCudaErrors(cudaFree(dig_inverse_b_device));
checkCudaErrors(cudaFree(x_device_iteration));
checkCudaErrors(cudaFree(residual_device));
}
template<typename T>
int matrix_form_cuda_jacobi_solver<T>::apply(const T b[],T x[],const T tol,const int max_iteration_num,const bool given)
{
int block_size=1024;
int num_block=(int)ceil(N/(T)block_size);
int iteration_num_now=0;
// printf("apply\n");
checkCudaErrors(cudaMemcpy(b_device,b,sizeof(T)*N,cudaMemcpyHostToDevice));
if(given==0)
{
init<<<num_block*2,block_size>>>(x_device_iteration,2*N);
}
else if(given==1)
{
checkCudaErrors(cudaMemcpy(x_device_iteration,x,sizeof(T)*N,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(x_device_iteration+N,x,sizeof(T)*N,cudaMemcpyHostToDevice));
}
set_Dinverseb<<< num_block,block_size >>>(dig_A_coefficient_inverse_device,b_device,dig_inverse_b_device,N); //no cuBlas API support
int new_iteration=1,old_iteration;
residual_sum_host=0;
const T alpha=1.0, beta=0;
T test_f_d;
const char* p_char=typeid(test_f_d).name();
if((*p_char)=='d')
{
for(iteration_num_now=0;iteration_num_now<max_iteration_num;iteration_num_now++)
{
old_iteration=!new_iteration;
cusparseDcsrmv(my_cusparse_handle,CUSPARSE_OPERATION_NON_TRANSPOSE,N,N, num_non_zero_T,(const double*)&alpha,my_descr,(const double*)val_T_device,row_offset_T_device,col_index_T_device,(const double*)x_device_iteration+(old_iteration*N),(const double*)&beta,(double*)x_device_iteration+(new_iteration*N));
cublasDaxpy(my_cublas_handle,N,(const double*)&alpha,(const double*)dig_inverse_b_device,1,(double*)x_device_iteration+(new_iteration*N),1);
get_tol_csr<<<num_block,block_size>>>(x_device_iteration,residual_device,N);
cublasDnrm2(my_cublas_handle,N,(const double*)residual_device,1,(double*)&residual_sum_host);
// printf("residual_sum_host :: %lf\n",residual_sum_host);
if(pow(residual_sum_host,2)<tol)
{
break;
}
new_iteration=!new_iteration;
}
}
else if((*p_char)=='f')
{
for(iteration_num_now=0;iteration_num_now<max_iteration_num;iteration_num_now++)
{
old_iteration=!new_iteration;
cusparseScsrmv(my_cusparse_handle,CUSPARSE_OPERATION_NON_TRANSPOSE,N,N, num_non_zero_T,(const float*)&alpha,my_descr,(const float*)val_T_device,row_offset_T_device,col_index_T_device,(const float*)x_device_iteration+(old_iteration*N),(const float*)&beta,(float*)x_device_iteration+(new_iteration*N));
cublasSaxpy(my_cublas_handle,N,(const float*)&alpha,(const float*)dig_inverse_b_device,1,(float*)x_device_iteration+(new_iteration*N),1);
get_tol_csr<<<num_block,block_size>>>(x_device_iteration,residual_device,N);
cublasSnrm2(my_cublas_handle,N,(const float*)residual_device,1,(float*)&residual_sum_host);
// printf("residual_sum_host :: %lf\n",residual_sum_host);
if(pow(residual_sum_host,2)<tol)
{
break;
}
new_iteration=!new_iteration;
}
}
printf("iteration_num_now ::%d\n",iteration_num_now);
checkCudaErrors(cudaMemcpy(x,x_device_iteration, N*sizeof(T), cudaMemcpyDeviceToHost));
return 0;
}
}
|
2c9b1f86d0ef87ff194b9ba21814733d199b1b1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "ctranslate2/primitives/primitives.h"
#include <cmath>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <hipcub/hipcub.hpp>
#include "../cuda/utils.h"
namespace ctranslate2 {
template <typename T, typename UnaryFunction>
void unary_transform(const T* x, T* y, dim_t size, UnaryFunction op) {
THRUST_CALL(thrust::transform, x, x + size, y, op);
}
template <typename T, typename BinaryFunction>
void binary_transform(const T* a, const T* b, T* c, dim_t size, BinaryFunction op) {
THRUST_CALL(thrust::transform, a, a + size, b, c, op);
}
template <typename T1, typename T2, typename T3, typename BinaryFunction, typename IndexFunction>
void binary_transform(T1 a, T2 b, T3 c, dim_t size,
BinaryFunction op, IndexFunction index_a) {
auto index_it = thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0), index_a);
auto a_it = thrust::make_permutation_iterator(a, index_it);
THRUST_CALL(thrust::transform, a_it, a_it + size, b, c, op);
}
// perm_fun is a functor that takes the index in the permuted iterator and
// return the index in the original iterator.
template <typename T, typename PermFunction>
void permute(const T* x, T* y, dim_t size, PermFunction perm_fun) {
auto ind_it = thrust::counting_iterator<dim_t>(0);
auto perm_ind_it = thrust::make_transform_iterator(ind_it, perm_fun);
auto perm_it = thrust::make_permutation_iterator(x, perm_ind_it);
THRUST_CALL(thrust::copy_n, perm_it, size, y);
}
static const cuda::CachingAllocatorConfig allocator_config = cuda::get_caching_allocator_config();
static hipcub::CachingDeviceAllocator allocator(
allocator_config.bin_growth,
allocator_config.min_bin,
allocator_config.max_bin,
allocator_config.max_cached_bytes);
template<>
void primitives<Device::CUDA>::set_device(int index) {
CUDA_CHECK(hipSetDevice(index));
}
template<>
int primitives<Device::CUDA>::get_device() {
int index;
CUDA_CHECK(hipGetDevice(&index));
return index;
}
template<>
void* primitives<Device::CUDA>::alloc_data(dim_t size) {
void* data = nullptr;
CUDA_CHECK(allocator.DeviceAllocate(&data, size, cuda::get_cuda_stream()));
return data;
}
template<>
void primitives<Device::CUDA>::free_data(void* data) {
CUDA_CHECK(allocator.DeviceFree(data));
}
template<>
void primitives<Device::CUDA>::clear_cache() {
CUDA_CHECK(allocator.FreeAllCached());
}
template<>
template <typename T>
T primitives<Device::CUDA>::deref(const T* x, dim_t index) {
T val = T();
cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1);
return val;
}
template<>
template <typename T>
void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) {
THRUST_CALL(thrust::fill_n, x, size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) {
auto it = thrust::make_permutation_iterator(
x, thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0),
thrust::placeholders::_1 * inc_x));
THRUST_CALL(thrust::fill_n, it, size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T),
hipMemcpyDeviceToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
T primitives<Device::CUDA>::sum(const T* array, dim_t size) {
return THRUST_CALL(thrust::reduce, array, array + size);
}
template<>
template <typename T>
dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element, array, array + size);
return static_cast<dim_t>(max - array);
}
template<>
template <typename T>
T primitives<Device::CUDA>::max(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element, array, array + size);
return deref(max, 0);
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) {
unary_transform(x, y, size, thrust::placeholders::_1 + a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) {
binary_transform(a, b, c, size, thrust::plus<T>());
}
template <typename T>
struct repeat_vec : thrust::unary_function<T, T> {
T _size;
repeat_vec(T size)
: _size(size) {
}
__host__ __device__
T operator()(const T& i) {
return i % _size;
}
};
template <typename T>
struct repeat_vec_depth : thrust::unary_function<T, T> {
T _size;
repeat_vec_depth(T size)
: _size(size) {
}
__host__ __device__
T operator()(const T& i) {
return i / _size;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
binary_transform(a, b, c, b_size, thrust::plus<T>(), repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
binary_transform(a, b, c, b_size, thrust::plus<T>(), repeat_vec_depth<dim_t>(b_size / a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) {
binary_transform(a, b, c, size, thrust::minus<T>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) {
unary_transform(x, y, size, thrust::placeholders::_1 * a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) {
binary_transform(a, b, c, size, thrust::multiplies<T>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
binary_transform(a, b, c, b_size, thrust::multiplies<T>(), repeat_vec<dim_t>(a_size));
}
struct absolute_maximum_func : public thrust::binary_function<float, float, float> {
__host__ __device__
float operator()(float a, float b) {
return fmaxf(fabsf(a), fabsf(b));
}
};
template <typename T>
struct quantize_func : public thrust::binary_function<float, float, T> {
__host__ __device__
T operator()(float scale, float x) {
return static_cast<T>(x * scale);
}
};
template<>
void primitives<Device::CUDA>::quantize_batch(const float* x, float* scales, int8_t* qx,
dim_t batch_size, dim_t depth) {
const dim_t size = batch_size * depth;
// Assign 1 key per batch.
auto keys_it = thrust::make_transform_iterator(thrust::counting_iterator<int>(0),
repeat_vec_depth<int>(depth));
// scales = 127.0 / reduce_max(abs(x), axis=1)
THRUST_CALL(thrust::reduce_by_key,
keys_it, keys_it + size,
x,
thrust::make_discard_iterator(),
thrust::make_transform_output_iterator(
scales, static_cast<float>(127) / thrust::placeholders::_1),
thrust::equal_to<int>(),
absolute_maximum_func());
// qx = x * expand_dims(scales, 1)
binary_transform(scales, x, qx, size,
quantize_func<int8_t>(),
repeat_vec_depth<dim_t>(depth));
}
template <typename T>
struct dequantize_func : public thrust::binary_function<float, T, float> {
__device__
float operator()(float scale, T x) {
return __fdividef(static_cast<float>(x), scale);
}
};
template<>
template<>
void primitives<Device::CUDA>::dequantize_batch(const int8_t* x, const float* scale, float* y,
dim_t x_size, dim_t scale_size) {
binary_transform(scale, x, y, x_size,
dequantize_func<int8_t>(),
repeat_vec_depth<dim_t>(x_size / scale_size));
}
struct rescale_func : public thrust::binary_function<int32_t, thrust::tuple<float, float>, float> {
__device__
float operator()(int32_t x, const thrust::tuple<float, float>& scales) {
return __fdividef(__int2float_rn(x), (thrust::get<0>(scales) * thrust::get<1>(scales)));
}
};
template<>
void primitives<Device::CUDA>::rescale_output(const int32_t* x,
const float* input_scales,
const float* weight_scales,
float* y,
dim_t batch_size,
dim_t depth) {
const dim_t size = batch_size * depth;
// y = x / (expand_dims(input_scales, 1) * expand_dims(weight_scales, 0)
auto input_scales_it = thrust::make_permutation_iterator(
input_scales,
thrust::make_transform_iterator(thrust::counting_iterator<int>(0),
repeat_vec_depth<int>(depth)));
auto weight_scales_it = thrust::make_permutation_iterator(
weight_scales,
thrust::make_transform_iterator(thrust::counting_iterator<int>(0),
repeat_vec<int>(depth)));
auto scales_it = thrust::make_zip_iterator(thrust::make_tuple(input_scales_it, weight_scales_it));
THRUST_CALL(thrust::transform,
x, x + size,
scales_it,
y,
rescale_func());
}
struct relu_func : public thrust::unary_function<float, float> {
__host__ __device__
float operator()(float x) { return fmaxf(x, 0); }
};
template<>
void primitives<Device::CUDA>::relu(const float* x, float* y, dim_t size) {
unary_transform(x, y, size, relu_func());
}
struct gelu_func : public thrust::unary_function<float, float> {
float _scale;
gelu_func(float scale)
: _scale(scale) {
}
__host__ __device__
float operator()(float x) {
return 0.5f * x * (1.f + tanhf(_scale * (x + 0.044715f * powf(x, 3.f))));
}
};
template<>
void primitives<Device::CUDA>::gelu(const float* x, float* y, dim_t size) {
static const float pi = std::acos(-1.f);
static const float scale = std::sqrt(2.f / pi);
unary_transform(x, y, size, gelu_func(scale));
}
template <typename T>
struct perm_indices_2d : public thrust::unary_function<T, T> {
T _rows, _cols;
perm_indices_2d(T rows, T cols)
: _rows(rows)
, _cols(cols) {
}
__host__ __device__
T operator()(const T& i) const {
const T i0 = i / _rows;
const T i1 = i % _rows;
return i1 * _cols + i0;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) {
permute(a, b, dims[0] * dims[1], perm_indices_2d<dim_t>(dims[0], dims[1]));
}
template <typename T>
struct perm_indices_3d : public thrust::unary_function<T, T> {
T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2; // Strides of the permutated array.
perm_indices_3d(const T* dims, const T* perm) {
const T a_stride[3] = {dims[1] * dims[2], dims[2], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_s0 = _b_d1 * _b_d2;
_b_s1 = _b_d2;
_b_s2 = 1;
}
__host__ __device__
T operator()(const T& i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i % _b_d2;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_3d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<dim_t>(dims, perm));
}
template <typename T>
struct perm_indices_4d : public thrust::unary_function<T, T> {
T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array.
perm_indices_4d(const T* dims, const T* perm) {
const T a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_a_ps3 = a_stride[perm[3]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_d3 = dims[perm[3]];
_b_s0 = _b_d1 * _b_d2 * _b_d3;
_b_s1 = _b_d2 * _b_d3;
_b_s2 = _b_d3;
_b_s3 = 1;
}
__host__ __device__
T operator()(const T& i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i / _b_s2 % _b_d2;
const T i3 = i % _b_d3;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_4d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<dim_t>(dims, perm));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemm(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb,
a, lda,
&beta,
c, ldc));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const int8_t* a, const int8_t* b,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
int32_t* c) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int32_t alpha_i = alpha;
int32_t beta_i = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_i,
b, HIP_R_8I, ldb,
a, HIP_R_8I, lda,
&beta_i,
c, HIP_R_32I, ldc,
HIP_R_32I,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemmStridedBatched(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb, strideb,
a, lda, stridea,
&beta,
c, ldc, stridec,
batch_size));
}
struct exp_func : public thrust::unary_function<float, float> {
__host__ __device__
float operator()(float x) { return expf(x); }
};
template<>
void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) {
unary_transform(x, y, size, exp_func());
}
struct log_func : public thrust::unary_function<float, float> {
__host__ __device__
float operator()(float x) { return logf(x); }
};
template<>
void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) {
unary_transform(x, y, size, log_func());
}
struct pow_func : public thrust::unary_function<float, float> {
float _power;
pow_func(float power)
: _power(power) {
}
__host__ __device__
float operator()(float x) { return powf(x, _power); }
};
template<>
void primitives<Device::CUDA>::pow(const float* x, float* y, float power, dim_t size) {
unary_transform(x, y, size, pow_func(power));
}
template<>
template <typename T>
void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyHostToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyDeviceToHost, cuda::get_cuda_stream()));
}
#define DECLARE_IMPL(T) \
template T \
primitives<Device::CUDA>::deref(const T* x, dim_t index); \
template void \
primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \
template void \
primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \
template void \
primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \
template T \
primitives<Device::CUDA>::sum(const T* array, dim_t size); \
template dim_t \
primitives<Device::CUDA>::max_element(const T* array, dim_t size); \
template T \
primitives<Device::CUDA>::max(const T* array, dim_t size); \
template void \
primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::transpose_2d(const T* a, \
const dim_t* dims, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_3d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_4d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \
template void \
cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t);
DECLARE_ALL_TYPES(DECLARE_IMPL)
}
| 2c9b1f86d0ef87ff194b9ba21814733d199b1b1a.cu | #include "ctranslate2/primitives/primitives.h"
#include <cmath>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <cub/util_allocator.cuh>
#include "../cuda/utils.h"
namespace ctranslate2 {
template <typename T, typename UnaryFunction>
void unary_transform(const T* x, T* y, dim_t size, UnaryFunction op) {
THRUST_CALL(thrust::transform, x, x + size, y, op);
}
template <typename T, typename BinaryFunction>
void binary_transform(const T* a, const T* b, T* c, dim_t size, BinaryFunction op) {
THRUST_CALL(thrust::transform, a, a + size, b, c, op);
}
template <typename T1, typename T2, typename T3, typename BinaryFunction, typename IndexFunction>
void binary_transform(T1 a, T2 b, T3 c, dim_t size,
BinaryFunction op, IndexFunction index_a) {
auto index_it = thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0), index_a);
auto a_it = thrust::make_permutation_iterator(a, index_it);
THRUST_CALL(thrust::transform, a_it, a_it + size, b, c, op);
}
// perm_fun is a functor that takes the index in the permuted iterator and
// return the index in the original iterator.
template <typename T, typename PermFunction>
void permute(const T* x, T* y, dim_t size, PermFunction perm_fun) {
auto ind_it = thrust::counting_iterator<dim_t>(0);
auto perm_ind_it = thrust::make_transform_iterator(ind_it, perm_fun);
auto perm_it = thrust::make_permutation_iterator(x, perm_ind_it);
THRUST_CALL(thrust::copy_n, perm_it, size, y);
}
static const cuda::CachingAllocatorConfig allocator_config = cuda::get_caching_allocator_config();
static cub::CachingDeviceAllocator allocator(
allocator_config.bin_growth,
allocator_config.min_bin,
allocator_config.max_bin,
allocator_config.max_cached_bytes);
template<>
void primitives<Device::CUDA>::set_device(int index) {
CUDA_CHECK(cudaSetDevice(index));
}
template<>
int primitives<Device::CUDA>::get_device() {
int index;
CUDA_CHECK(cudaGetDevice(&index));
return index;
}
template<>
void* primitives<Device::CUDA>::alloc_data(dim_t size) {
void* data = nullptr;
CUDA_CHECK(allocator.DeviceAllocate(&data, size, cuda::get_cuda_stream()));
return data;
}
template<>
void primitives<Device::CUDA>::free_data(void* data) {
CUDA_CHECK(allocator.DeviceFree(data));
}
template<>
void primitives<Device::CUDA>::clear_cache() {
CUDA_CHECK(allocator.FreeAllCached());
}
template<>
template <typename T>
T primitives<Device::CUDA>::deref(const T* x, dim_t index) {
T val = T();
cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1);
return val;
}
template<>
template <typename T>
void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) {
THRUST_CALL(thrust::fill_n, x, size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) {
auto it = thrust::make_permutation_iterator(
x, thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0),
thrust::placeholders::_1 * inc_x));
THRUST_CALL(thrust::fill_n, it, size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T),
cudaMemcpyDeviceToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
T primitives<Device::CUDA>::sum(const T* array, dim_t size) {
return THRUST_CALL(thrust::reduce, array, array + size);
}
template<>
template <typename T>
dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element, array, array + size);
return static_cast<dim_t>(max - array);
}
template<>
template <typename T>
T primitives<Device::CUDA>::max(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element, array, array + size);
return deref(max, 0);
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) {
unary_transform(x, y, size, thrust::placeholders::_1 + a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) {
binary_transform(a, b, c, size, thrust::plus<T>());
}
template <typename T>
struct repeat_vec : thrust::unary_function<T, T> {
T _size;
repeat_vec(T size)
: _size(size) {
}
__host__ __device__
T operator()(const T& i) {
return i % _size;
}
};
template <typename T>
struct repeat_vec_depth : thrust::unary_function<T, T> {
T _size;
repeat_vec_depth(T size)
: _size(size) {
}
__host__ __device__
T operator()(const T& i) {
return i / _size;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
binary_transform(a, b, c, b_size, thrust::plus<T>(), repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
binary_transform(a, b, c, b_size, thrust::plus<T>(), repeat_vec_depth<dim_t>(b_size / a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) {
binary_transform(a, b, c, size, thrust::minus<T>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) {
unary_transform(x, y, size, thrust::placeholders::_1 * a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) {
binary_transform(a, b, c, size, thrust::multiplies<T>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
binary_transform(a, b, c, b_size, thrust::multiplies<T>(), repeat_vec<dim_t>(a_size));
}
struct absolute_maximum_func : public thrust::binary_function<float, float, float> {
__host__ __device__
float operator()(float a, float b) {
return fmaxf(fabsf(a), fabsf(b));
}
};
template <typename T>
struct quantize_func : public thrust::binary_function<float, float, T> {
__host__ __device__
T operator()(float scale, float x) {
return static_cast<T>(x * scale);
}
};
template<>
void primitives<Device::CUDA>::quantize_batch(const float* x, float* scales, int8_t* qx,
dim_t batch_size, dim_t depth) {
const dim_t size = batch_size * depth;
// Assign 1 key per batch.
auto keys_it = thrust::make_transform_iterator(thrust::counting_iterator<int>(0),
repeat_vec_depth<int>(depth));
// scales = 127.0 / reduce_max(abs(x), axis=1)
THRUST_CALL(thrust::reduce_by_key,
keys_it, keys_it + size,
x,
thrust::make_discard_iterator(),
thrust::make_transform_output_iterator(
scales, static_cast<float>(127) / thrust::placeholders::_1),
thrust::equal_to<int>(),
absolute_maximum_func());
// qx = x * expand_dims(scales, 1)
binary_transform(scales, x, qx, size,
quantize_func<int8_t>(),
repeat_vec_depth<dim_t>(depth));
}
template <typename T>
struct dequantize_func : public thrust::binary_function<float, T, float> {
__device__
float operator()(float scale, T x) {
return __fdividef(static_cast<float>(x), scale);
}
};
template<>
template<>
void primitives<Device::CUDA>::dequantize_batch(const int8_t* x, const float* scale, float* y,
dim_t x_size, dim_t scale_size) {
binary_transform(scale, x, y, x_size,
dequantize_func<int8_t>(),
repeat_vec_depth<dim_t>(x_size / scale_size));
}
struct rescale_func : public thrust::binary_function<int32_t, thrust::tuple<float, float>, float> {
__device__
float operator()(int32_t x, const thrust::tuple<float, float>& scales) {
return __fdividef(__int2float_rn(x), (thrust::get<0>(scales) * thrust::get<1>(scales)));
}
};
template<>
void primitives<Device::CUDA>::rescale_output(const int32_t* x,
const float* input_scales,
const float* weight_scales,
float* y,
dim_t batch_size,
dim_t depth) {
const dim_t size = batch_size * depth;
// y = x / (expand_dims(input_scales, 1) * expand_dims(weight_scales, 0)
auto input_scales_it = thrust::make_permutation_iterator(
input_scales,
thrust::make_transform_iterator(thrust::counting_iterator<int>(0),
repeat_vec_depth<int>(depth)));
auto weight_scales_it = thrust::make_permutation_iterator(
weight_scales,
thrust::make_transform_iterator(thrust::counting_iterator<int>(0),
repeat_vec<int>(depth)));
auto scales_it = thrust::make_zip_iterator(thrust::make_tuple(input_scales_it, weight_scales_it));
THRUST_CALL(thrust::transform,
x, x + size,
scales_it,
y,
rescale_func());
}
struct relu_func : public thrust::unary_function<float, float> {
__host__ __device__
float operator()(float x) { return fmaxf(x, 0); }
};
template<>
void primitives<Device::CUDA>::relu(const float* x, float* y, dim_t size) {
unary_transform(x, y, size, relu_func());
}
struct gelu_func : public thrust::unary_function<float, float> {
float _scale;
gelu_func(float scale)
: _scale(scale) {
}
__host__ __device__
float operator()(float x) {
return 0.5f * x * (1.f + tanhf(_scale * (x + 0.044715f * powf(x, 3.f))));
}
};
template<>
void primitives<Device::CUDA>::gelu(const float* x, float* y, dim_t size) {
static const float pi = std::acos(-1.f);
static const float scale = std::sqrt(2.f / pi);
unary_transform(x, y, size, gelu_func(scale));
}
template <typename T>
struct perm_indices_2d : public thrust::unary_function<T, T> {
T _rows, _cols;
perm_indices_2d(T rows, T cols)
: _rows(rows)
, _cols(cols) {
}
__host__ __device__
T operator()(const T& i) const {
const T i0 = i / _rows;
const T i1 = i % _rows;
return i1 * _cols + i0;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) {
permute(a, b, dims[0] * dims[1], perm_indices_2d<dim_t>(dims[0], dims[1]));
}
template <typename T>
struct perm_indices_3d : public thrust::unary_function<T, T> {
T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2; // Strides of the permutated array.
perm_indices_3d(const T* dims, const T* perm) {
const T a_stride[3] = {dims[1] * dims[2], dims[2], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_s0 = _b_d1 * _b_d2;
_b_s1 = _b_d2;
_b_s2 = 1;
}
__host__ __device__
T operator()(const T& i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i % _b_d2;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_3d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<dim_t>(dims, perm));
}
template <typename T>
struct perm_indices_4d : public thrust::unary_function<T, T> {
T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array.
perm_indices_4d(const T* dims, const T* perm) {
const T a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_a_ps3 = a_stride[perm[3]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_d3 = dims[perm[3]];
_b_s0 = _b_d1 * _b_d2 * _b_d3;
_b_s1 = _b_d2 * _b_d3;
_b_s2 = _b_d3;
_b_s3 = 1;
}
__host__ __device__
T operator()(const T& i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i / _b_s2 % _b_d2;
const T i3 = i % _b_d3;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_4d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<dim_t>(dims, perm));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemm(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb,
a, lda,
&beta,
c, ldc));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const int8_t* a, const int8_t* b,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
int32_t* c) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
int32_t alpha_i = alpha;
int32_t beta_i = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_i,
b, CUDA_R_8I, ldb,
a, CUDA_R_8I, lda,
&beta_i,
c, CUDA_R_32I, ldc,
CUDA_R_32I,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemmStridedBatched(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb, strideb,
a, lda, stridea,
&beta,
c, ldc, stridec,
batch_size));
}
struct exp_func : public thrust::unary_function<float, float> {
__host__ __device__
float operator()(float x) { return expf(x); }
};
template<>
void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) {
unary_transform(x, y, size, exp_func());
}
struct log_func : public thrust::unary_function<float, float> {
__host__ __device__
float operator()(float x) { return logf(x); }
};
template<>
void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) {
unary_transform(x, y, size, log_func());
}
struct pow_func : public thrust::unary_function<float, float> {
float _power;
pow_func(float power)
: _power(power) {
}
__host__ __device__
float operator()(float x) { return powf(x, _power); }
};
template<>
void primitives<Device::CUDA>::pow(const float* x, float* y, float power, dim_t size) {
unary_transform(x, y, size, pow_func(power));
}
template<>
template <typename T>
void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyHostToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyDeviceToHost, cuda::get_cuda_stream()));
}
#define DECLARE_IMPL(T) \
template T \
primitives<Device::CUDA>::deref(const T* x, dim_t index); \
template void \
primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \
template void \
primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \
template void \
primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \
template T \
primitives<Device::CUDA>::sum(const T* array, dim_t size); \
template dim_t \
primitives<Device::CUDA>::max_element(const T* array, dim_t size); \
template T \
primitives<Device::CUDA>::max(const T* array, dim_t size); \
template void \
primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::transpose_2d(const T* a, \
const dim_t* dims, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_3d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_4d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \
template void \
cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t);
DECLARE_ALL_TYPES(DECLARE_IMPL)
}
|
019a10645b8bdc29fa2bc33fa4f2f2e7f70be76e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "stdafx.h"
#include <unordered_map>
#include <vector>
#include <algorithm>
#include <iostream>
#include <string>
#include <tchar.h>
#include <thrust/device_vector.h>
using namespace std;
struct block {
int color ;
int bTNum ;
int amount;
};
__device__
bool inArray(int block[], int size, int numOfElem)
{
for (int i = 0; i < numOfElem ; ++i)
{
if (block[i] >= size)
{
return false;
}
}
__syncthreads();
return true;
}
__device__
bool equivalentBlockOnebyN(int *block, int row, int col, int blC, int blR, int **str, bool isRowCompare)
{
if (isRowCompare)
{
for (int b = 0; b < blC; b++)
{
if (str[row][b] != str[row][col])
{
return false;
}
}
}
else
{
for (int b = 0; b < blR; b++)
{
if (str[b][col] != str[row][col])
{
return false;
}
}
}
return true;
}
__device__
bool notInQueue(int *queue, int *block, int row, int col, int blC, int blR, bool isRowCompare)
{
if (isRowCompare)
{
for (int b = 0; b < blC; b++)
{
int oneDArr = row + block[b] ;
if (queue[oneDArr] == 1)
{
return true;
}
}
}
else
{
for (int b = 0; b < blR; b++)
{
int oneDArr = row * block[b] + col ;
if (queue[oneDArr]== 1)
{
return true;
}
}
}
return false;
}
__device__
void addToQueue(int *queue, int *block, int row, int col, int blC, int blR, bool isRowCompare)
{
if (isRowCompare)
{
for (int b = 0; b < blC; b++)
{
int oneDArr = row * block[b] ;
atomicAdd(&queue[oneDArr], 1);
}
}
else
{
for (int b = 0; b < blR; b++)
{
int oneDArr = row * block[b] + col ;
atomicAdd(&queue[oneDArr], 1);
}
}
}
__device__
bool isInQueue ( int *queue, int item ) {
if ( queue[item] == 1 ) {
return true;
}
else{
return false;
}
}
__global__
void general(int *queue, int **str, int rowP, int colP, int blR, int blC, int **blockType, int *colors, block *blockStruct)
{
extern __shared__ int shared[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
bool truth = false;
bool badRow = false;
bool badCol = false;
int blockC[4] ;
int blockR[4] ;
int oneDArr = row * rowP + col;
if (row >= rowP || col >= colP){ return; }
if (!isInQueue(queue, oneDArr) )
{
for (int bl = 0; bl < blC; bl++)
{
if (bl < colP)
{
blockC[bl] = col + bl;
}
else
{
badCol = true;
}
}
for (int bl = 0; bl < blR; bl++)
{
if (bl < rowP)
{
blockR[bl] = row + bl;
}
else
{
badRow = true;
}
}
__syncthreads();
if (inArray(blockC, col, blC))
{
if (equivalentBlockOnebyN(blockC, row, col, blC, blR, str, true))
{
if (!notInQueue(queue, blockC, row, col, blC, blR, true) & !badCol)
{
addToQueue(queue, blockC, row, col, blC, blR, true);
truth = true;
__syncthreads();
}
}
}
if (inArray(blockR, row, blR))
{
if (equivalentBlockOnebyN(blockR, row, col,blC, blR, str, false))
{
if (!notInQueue(queue, blockR, row, col, blC, blR, false) & !badRow)
{
addToQueue(queue, blockR, row, col, blC, blR, false);
truth = true;
__syncthreads();
}
}
}
if (truth == true)
{
for ( int i = 0 ; i < 5; i++)
{
if (blockStruct[i].color == colors[str[row][col]])
{
atomicAdd(&blockStruct[i].amount, 1 ) ;
}
}
}
}
}
void optimize(int str[][], int rows, int cols)
{
int n = 1024;
block block1x4[5];
block block1x3[5];
block block1x2[5];
block block1x1[5];
for (int i = 0; i < 5; i++)
{
block1x4[i].color = i;
block1x4[i].bTNum = 0;
block1x4[i].amount = 0 ;
block1x3[i].color = i;
block1x3[i].bTNum = 1;
block1x3[i].amount = 0 ;
block1x2[i].color = i;
block1x2[i].bTNum = 2;
block1x2[i].amount = 0 ;
block1x1[i].color = i;
block1x1[i].bTNum = 3;
block1x1[i].amount = 0 ;
}
const dim3 block_dim(((rows * cols) + n - 1) / n);
const dim3 thread_dim(n);
char *colors[] = { "black", "white", "purple", "yellow", "blue" } ;
char *bT[] = { "1x4", "1x3", "1x2", "1x1" };
int colorsNum[] = { 0, 1, 2, 3, 4 };
int bTNum[] = { 0, 1, 2, 3, 4, 5, 6 } ;
block *dBlock1x4, *dBlock1x3, *dBlock1x2, *dBlock1x1;
checkCudaErrors(hipMalloc((void **)&dBlock1x4, sizeof(block) * 5));
checkCudaErrors(hipMalloc(&dBlock1x3, sizeof(block) * 5));
checkCudaErrors(hipMalloc(&dBlock1x2, sizeof(block) * 5));
checkCudaErrors(hipMalloc(&dBlock1x1, sizeof(block) * 5));
checkCudaErrors(hipMemcpy(dBlock1x4, block1x4, sizeof(block) * 5, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dBlock1x3, block1x3, sizeof(block) * 5, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dBlock1x2, block1x2, sizeof(block) * 5, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dBlock1x1, block1x1, sizeof(block) * 5, hipMemcpyHostToDevice));
int **dStr, *dqueue, *dColorsNum, **dbTNum;
checkCudaErrors(hipMalloc(&dColorsNum, sizeof(int) * 5));
checkCudaErrors(hipMalloc(&dbTNum, sizeof(int) * 7));
checkCudaErrors(hipMalloc(&dStr, sizeof(int) * (rows* cols) ));
checkCudaErrors(hipMalloc(&dqueue, sizeof(int) * (rows * cols )));
checkCudaErrors(hipMemcpy(dColorsNum, colorsNum, sizeof(int) * 5, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dbTNum, bTNum, sizeof(int) * 7, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dStr, str, sizeof(int) * (rows* cols), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( general), dim3(block_dim), dim3(thread_dim), sizeof(int) * 72, 0, dqueue, dStr, rows, cols, 4, 4, dbTNum, dColorsNum, dBlock1x4);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( general), dim3(block_dim), dim3(thread_dim), sizeof(int) * 72, 0, dqueue, dStr, rows, cols, 3, 3, dbTNum, dColorsNum, dBlock1x3);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( general), dim3(block_dim), dim3(thread_dim), sizeof(int) * 72, 0, dqueue, dStr, rows, cols, 2, 2, dbTNum, dColorsNum, dBlock1x2);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( general), dim3(block_dim), dim3(thread_dim), sizeof(int) * 72, 0, dqueue, dStr, rows, cols, 1, 1, dbTNum, dColorsNum, dBlock1x1);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(block1x4, dBlock1x4, sizeof(block) * 5, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(block1x3, dBlock1x3, sizeof(block) * 5, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(block1x2, dBlock1x2, sizeof(block) * 5, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(block1x1, dBlock1x1, sizeof(block) * 5, hipMemcpyDeviceToHost));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
int bcount = 0 ;
for (int i = 0 ; i < 5; i++)
{
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x4[i].color] , bT[block1x4[i].bTNum], block1x4[i].amount );
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x3[i].color] , bT[block1x3[i].bTNum], block1x3[i].amount );
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x2[i].color] , bT[block1x2[i].bTNum], block1x2[i].amount );
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x1[i].color] , bT[block1x1[i].bTNum], block1x1[i].amount );
bcount = bcount + block1x1[i].amount + block1x2[i].amount + block1x3[i].amount + block1x4[i].amount ;
}
printf ("Total Blocks: %d", bcount) ;
} | 019a10645b8bdc29fa2bc33fa4f2f2e7f70be76e.cu | #include "utils.h"
#include "stdafx.h"
#include <unordered_map>
#include <vector>
#include <algorithm>
#include <iostream>
#include <string>
#include <tchar.h>
#include <thrust/device_vector.h>
using namespace std;
struct block {
int color ;
int bTNum ;
int amount;
};
__device__
bool inArray(int block[], int size, int numOfElem)
{
for (int i = 0; i < numOfElem ; ++i)
{
if (block[i] >= size)
{
return false;
}
}
__syncthreads();
return true;
}
__device__
bool equivalentBlockOnebyN(int *block, int row, int col, int blC, int blR, int **str, bool isRowCompare)
{
if (isRowCompare)
{
for (int b = 0; b < blC; b++)
{
if (str[row][b] != str[row][col])
{
return false;
}
}
}
else
{
for (int b = 0; b < blR; b++)
{
if (str[b][col] != str[row][col])
{
return false;
}
}
}
return true;
}
__device__
bool notInQueue(int *queue, int *block, int row, int col, int blC, int blR, bool isRowCompare)
{
if (isRowCompare)
{
for (int b = 0; b < blC; b++)
{
int oneDArr = row + block[b] ;
if (queue[oneDArr] == 1)
{
return true;
}
}
}
else
{
for (int b = 0; b < blR; b++)
{
int oneDArr = row * block[b] + col ;
if (queue[oneDArr]== 1)
{
return true;
}
}
}
return false;
}
__device__
void addToQueue(int *queue, int *block, int row, int col, int blC, int blR, bool isRowCompare)
{
if (isRowCompare)
{
for (int b = 0; b < blC; b++)
{
int oneDArr = row * block[b] ;
atomicAdd(&queue[oneDArr], 1);
}
}
else
{
for (int b = 0; b < blR; b++)
{
int oneDArr = row * block[b] + col ;
atomicAdd(&queue[oneDArr], 1);
}
}
}
__device__
bool isInQueue ( int *queue, int item ) {
if ( queue[item] == 1 ) {
return true;
}
else{
return false;
}
}
__global__
void general(int *queue, int **str, int rowP, int colP, int blR, int blC, int **blockType, int *colors, block *blockStruct)
{
extern __shared__ int shared[];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
bool truth = false;
bool badRow = false;
bool badCol = false;
int blockC[4] ;
int blockR[4] ;
int oneDArr = row * rowP + col;
if (row >= rowP || col >= colP){ return; }
if (!isInQueue(queue, oneDArr) )
{
for (int bl = 0; bl < blC; bl++)
{
if (bl < colP)
{
blockC[bl] = col + bl;
}
else
{
badCol = true;
}
}
for (int bl = 0; bl < blR; bl++)
{
if (bl < rowP)
{
blockR[bl] = row + bl;
}
else
{
badRow = true;
}
}
__syncthreads();
if (inArray(blockC, col, blC))
{
if (equivalentBlockOnebyN(blockC, row, col, blC, blR, str, true))
{
if (!notInQueue(queue, blockC, row, col, blC, blR, true) & !badCol)
{
addToQueue(queue, blockC, row, col, blC, blR, true);
truth = true;
__syncthreads();
}
}
}
if (inArray(blockR, row, blR))
{
if (equivalentBlockOnebyN(blockR, row, col,blC, blR, str, false))
{
if (!notInQueue(queue, blockR, row, col, blC, blR, false) & !badRow)
{
addToQueue(queue, blockR, row, col, blC, blR, false);
truth = true;
__syncthreads();
}
}
}
if (truth == true)
{
for ( int i = 0 ; i < 5; i++)
{
if (blockStruct[i].color == colors[str[row][col]])
{
atomicAdd(&blockStruct[i].amount, 1 ) ;
}
}
}
}
}
void optimize(int str[][], int rows, int cols)
{
int n = 1024;
block block1x4[5];
block block1x3[5];
block block1x2[5];
block block1x1[5];
for (int i = 0; i < 5; i++)
{
block1x4[i].color = i;
block1x4[i].bTNum = 0;
block1x4[i].amount = 0 ;
block1x3[i].color = i;
block1x3[i].bTNum = 1;
block1x3[i].amount = 0 ;
block1x2[i].color = i;
block1x2[i].bTNum = 2;
block1x2[i].amount = 0 ;
block1x1[i].color = i;
block1x1[i].bTNum = 3;
block1x1[i].amount = 0 ;
}
const dim3 block_dim(((rows * cols) + n - 1) / n);
const dim3 thread_dim(n);
char *colors[] = { "black", "white", "purple", "yellow", "blue" } ;
char *bT[] = { "1x4", "1x3", "1x2", "1x1" };
int colorsNum[] = { 0, 1, 2, 3, 4 };
int bTNum[] = { 0, 1, 2, 3, 4, 5, 6 } ;
block *dBlock1x4, *dBlock1x3, *dBlock1x2, *dBlock1x1;
checkCudaErrors(cudaMalloc((void **)&dBlock1x4, sizeof(block) * 5));
checkCudaErrors(cudaMalloc(&dBlock1x3, sizeof(block) * 5));
checkCudaErrors(cudaMalloc(&dBlock1x2, sizeof(block) * 5));
checkCudaErrors(cudaMalloc(&dBlock1x1, sizeof(block) * 5));
checkCudaErrors(cudaMemcpy(dBlock1x4, block1x4, sizeof(block) * 5, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dBlock1x3, block1x3, sizeof(block) * 5, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dBlock1x2, block1x2, sizeof(block) * 5, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dBlock1x1, block1x1, sizeof(block) * 5, cudaMemcpyHostToDevice));
int **dStr, *dqueue, *dColorsNum, **dbTNum;
checkCudaErrors(cudaMalloc(&dColorsNum, sizeof(int) * 5));
checkCudaErrors(cudaMalloc(&dbTNum, sizeof(int) * 7));
checkCudaErrors(cudaMalloc(&dStr, sizeof(int) * (rows* cols) ));
checkCudaErrors(cudaMalloc(&dqueue, sizeof(int) * (rows * cols )));
checkCudaErrors(cudaMemcpy(dColorsNum, colorsNum, sizeof(int) * 5, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dbTNum, bTNum, sizeof(int) * 7, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dStr, str, sizeof(int) * (rows* cols), cudaMemcpyHostToDevice));
general<<<block_dim, thread_dim, sizeof(int) * 72>>>(dqueue, dStr, rows, cols, 4, 4, dbTNum, dColorsNum, dBlock1x4);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
general<<<block_dim, thread_dim, sizeof(int) * 72>>>(dqueue, dStr, rows, cols, 3, 3, dbTNum, dColorsNum, dBlock1x3);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
general<<<block_dim, thread_dim, sizeof(int) * 72>>>(dqueue, dStr, rows, cols, 2, 2, dbTNum, dColorsNum, dBlock1x2);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
general<<<block_dim, thread_dim, sizeof(int) * 72>>>(dqueue, dStr, rows, cols, 1, 1, dbTNum, dColorsNum, dBlock1x1);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(block1x4, dBlock1x4, sizeof(block) * 5, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(block1x3, dBlock1x3, sizeof(block) * 5, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(block1x2, dBlock1x2, sizeof(block) * 5, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(block1x1, dBlock1x1, sizeof(block) * 5, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
int bcount = 0 ;
for (int i = 0 ; i < 5; i++)
{
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x4[i].color] , bT[block1x4[i].bTNum], block1x4[i].amount );
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x3[i].color] , bT[block1x3[i].bTNum], block1x3[i].amount );
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x2[i].color] , bT[block1x2[i].bTNum], block1x2[i].amount );
printf ("Block Color: %c Block Type: %c Amount: %d \n", colors[block1x1[i].color] , bT[block1x1[i].bTNum], block1x1[i].amount );
bcount = bcount + block1x1[i].amount + block1x2[i].amount + block1x3[i].amount + block1x4[i].amount ;
}
printf ("Total Blocks: %d", bcount) ;
} |
657a447a350160e3d9ca2a4d484603a9ba6e01b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaS_ssdToOutput_kernels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int batchSize = 1;
unsigned int nbClass = 1;
unsigned int nbAnchors = 1;
unsigned int channelWidth = 1;
unsigned int channelHeight = 1;
unsigned int nbProposals = 1;
unsigned int *nbValidROIs = NULL;
hipMalloc(&nbValidROIs, XSIZE*YSIZE);
unsigned int cls = 1;
unsigned int totalParts = 1;
unsigned int totalTemplates = 1;
unsigned int maxParts = 1;
unsigned int maxTemplates = 1;
unsigned int cumulParts = 1;
unsigned int cumulTemplates = 1;
unsigned int nbParts = 1;
unsigned int nbTemplates = 1;
float xRatio = 1;
float yRatio = 1;
float xOutputRatio = 1;
float yOutputRatio = 1;
const float *roi_bbox = NULL;
hipMalloc(&roi_bbox, XSIZE*YSIZE);
const float *roi_anchors = NULL;
hipMalloc(&roi_anchors, XSIZE*YSIZE);
const float *anchors = NULL;
hipMalloc(&anchors, XSIZE*YSIZE);
const float *inputs_parts = NULL;
hipMalloc(&inputs_parts, XSIZE*YSIZE);
const float *inputs_templates = NULL;
hipMalloc(&inputs_templates, XSIZE*YSIZE);
float *outputs = NULL;
hipMalloc(&outputs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaS_ssdToOutput_kernels), dim3(gridBlock),dim3(threadBlock), 0, 0, batchSize,nbClass,nbAnchors,channelWidth,channelHeight,nbProposals,nbValidROIs,cls,totalParts,totalTemplates,maxParts,maxTemplates,cumulParts,cumulTemplates,nbParts,nbTemplates,xRatio,yRatio,xOutputRatio,yOutputRatio,roi_bbox,roi_anchors,anchors,inputs_parts,inputs_templates,outputs);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaS_ssdToOutput_kernels), dim3(gridBlock),dim3(threadBlock), 0, 0, batchSize,nbClass,nbAnchors,channelWidth,channelHeight,nbProposals,nbValidROIs,cls,totalParts,totalTemplates,maxParts,maxTemplates,cumulParts,cumulTemplates,nbParts,nbTemplates,xRatio,yRatio,xOutputRatio,yOutputRatio,roi_bbox,roi_anchors,anchors,inputs_parts,inputs_templates,outputs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaS_ssdToOutput_kernels), dim3(gridBlock),dim3(threadBlock), 0, 0, batchSize,nbClass,nbAnchors,channelWidth,channelHeight,nbProposals,nbValidROIs,cls,totalParts,totalTemplates,maxParts,maxTemplates,cumulParts,cumulTemplates,nbParts,nbTemplates,xRatio,yRatio,xOutputRatio,yOutputRatio,roi_bbox,roi_anchors,anchors,inputs_parts,inputs_templates,outputs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 657a447a350160e3d9ca2a4d484603a9ba6e01b4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaS_ssdToOutput_kernels.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int batchSize = 1;
unsigned int nbClass = 1;
unsigned int nbAnchors = 1;
unsigned int channelWidth = 1;
unsigned int channelHeight = 1;
unsigned int nbProposals = 1;
unsigned int *nbValidROIs = NULL;
cudaMalloc(&nbValidROIs, XSIZE*YSIZE);
unsigned int cls = 1;
unsigned int totalParts = 1;
unsigned int totalTemplates = 1;
unsigned int maxParts = 1;
unsigned int maxTemplates = 1;
unsigned int cumulParts = 1;
unsigned int cumulTemplates = 1;
unsigned int nbParts = 1;
unsigned int nbTemplates = 1;
float xRatio = 1;
float yRatio = 1;
float xOutputRatio = 1;
float yOutputRatio = 1;
const float *roi_bbox = NULL;
cudaMalloc(&roi_bbox, XSIZE*YSIZE);
const float *roi_anchors = NULL;
cudaMalloc(&roi_anchors, XSIZE*YSIZE);
const float *anchors = NULL;
cudaMalloc(&anchors, XSIZE*YSIZE);
const float *inputs_parts = NULL;
cudaMalloc(&inputs_parts, XSIZE*YSIZE);
const float *inputs_templates = NULL;
cudaMalloc(&inputs_templates, XSIZE*YSIZE);
float *outputs = NULL;
cudaMalloc(&outputs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaS_ssdToOutput_kernels<<<gridBlock,threadBlock>>>(batchSize,nbClass,nbAnchors,channelWidth,channelHeight,nbProposals,nbValidROIs,cls,totalParts,totalTemplates,maxParts,maxTemplates,cumulParts,cumulTemplates,nbParts,nbTemplates,xRatio,yRatio,xOutputRatio,yOutputRatio,roi_bbox,roi_anchors,anchors,inputs_parts,inputs_templates,outputs);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaS_ssdToOutput_kernels<<<gridBlock,threadBlock>>>(batchSize,nbClass,nbAnchors,channelWidth,channelHeight,nbProposals,nbValidROIs,cls,totalParts,totalTemplates,maxParts,maxTemplates,cumulParts,cumulTemplates,nbParts,nbTemplates,xRatio,yRatio,xOutputRatio,yOutputRatio,roi_bbox,roi_anchors,anchors,inputs_parts,inputs_templates,outputs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaS_ssdToOutput_kernels<<<gridBlock,threadBlock>>>(batchSize,nbClass,nbAnchors,channelWidth,channelHeight,nbProposals,nbValidROIs,cls,totalParts,totalTemplates,maxParts,maxTemplates,cumulParts,cumulTemplates,nbParts,nbTemplates,xRatio,yRatio,xOutputRatio,yOutputRatio,roi_bbox,roi_anchors,anchors,inputs_parts,inputs_templates,outputs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
955254d01f419b6174fc7d7f807d5ef4c618340f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void mul( int *a, int *b, int *c) {
int i = blockIdx.x/4;
int j = (blockIdx.x%4) * blockDim.x + threadIdx.x;
c[i*2048+j] = 0;
for(int k=0; k<N; ++k){
c[i*2048+j] += a[i*2048+k]*a[k*2048+j];
}
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c, *d; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for N
int i, j, k;
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
d = (int*)malloc( size );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice );
// launch an rev() kernel with N threads
hipLaunchKernelGGL(( mul), dim3(N/THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK) , 0, 0, dev_a, dev_b, dev_c);
// copy device result back to host copy of c
hipMemcpy( c, dev_c, size, hipMemcpyDeviceToHost );
for(i=0; i<N; i++) {
d[i] = 0;
}
for(i=0; i<2048; i++) {
for(j=0; j<2048; j++) {
for(k=0; k<2048; k++) {
d[i*2048+j] += a[i*2048+k]*b[k*2048+j];
}
if(c[i*2048+j]!=d[i*2048+j]) {
printf("error: expected %d, got %d!\n",d[i*2048+j], c[i*2048+j]);
break;
}
}
}
if(i==N) {printf("correct! \n");}
free( a ); free( b ); free( c ); free( d );
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
} | 955254d01f419b6174fc7d7f807d5ef4c618340f.cu | #include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void mul( int *a, int *b, int *c) {
int i = blockIdx.x/4;
int j = (blockIdx.x%4) * blockDim.x + threadIdx.x;
c[i*2048+j] = 0;
for(int k=0; k<N; ++k){
c[i*2048+j] += a[i*2048+k]*a[k*2048+j];
}
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c, *d; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for N
int i, j, k;
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
d = (int*)malloc( size );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// launch an rev() kernel with N threads
mul<<< N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>( dev_a, dev_b, dev_c);
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
for(i=0; i<N; i++) {
d[i] = 0;
}
for(i=0; i<2048; i++) {
for(j=0; j<2048; j++) {
for(k=0; k<2048; k++) {
d[i*2048+j] += a[i*2048+k]*b[k*2048+j];
}
if(c[i*2048+j]!=d[i*2048+j]) {
printf("error: expected %d, got %d!\n",d[i*2048+j], c[i*2048+j]);
break;
}
}
}
if(i==N) {printf("correct! \n");}
free( a ); free( b ); free( c ); free( d );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
} |
de3e6dfa2249964b9d16ac9a81bde50fd1e47f90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13) {
float tmp_1 = (var_2 / var_3 * var_4);
comp = tmp_1 * (-1.5951E26f - (var_5 * (var_6 + (var_7 * var_8))));
for (int i=0; i < var_1; ++i) {
float tmp_2 = (var_9 * -1.6596E-42f * var_10 / (var_11 / (-1.8762E-35f + var_12)));
comp += tmp_2 + asinf(+1.1577E23f / -1.1409E35f * asinf(-1.4979E-43f / (-1.1623E36f + (var_13 * +1.2639E-25f))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14);
hipDeviceSynchronize();
return 0;
}
| de3e6dfa2249964b9d16ac9a81bde50fd1e47f90.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13) {
float tmp_1 = (var_2 / var_3 * var_4);
comp = tmp_1 * (-1.5951E26f - (var_5 * (var_6 + (var_7 * var_8))));
for (int i=0; i < var_1; ++i) {
float tmp_2 = (var_9 * -1.6596E-42f * var_10 / (var_11 / (-1.8762E-35f + var_12)));
comp += tmp_2 + asinf(+1.1577E23f / -1.1409E35f * asinf(-1.4979E-43f / (-1.1623E36f + (var_13 * +1.2639E-25f))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14);
cudaDeviceSynchronize();
return 0;
}
|
200cc9164dab5bcb60eaaed653ecdc880e72c87f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* bucket_hash.cu
*
* Created on: Apr 14, 2015
* Author: christian
*/
#include "structures.h"
/*
* Awesome routine from a PhD thesis from UC Davis.
* Title is "Efficient Hash Tables on the GPU" by
* Dan Anthony Feliciano Alcantara.
*/
__global__
void find_boundaries
(
const int num_keys,
const int num_buckets,
const int * __restrict__ which_bucket,
int * __restrict__ bucket_starts
)
{
const int thread_id = currThreadID();
for (int tid = thread_id; tid < num_keys; tid += grid_size())
{
// get start and end of each bucket
const int begin = (tid > 0 ? which_bucket[tid - 1] : 0);
const int end = which_bucket[tid];
// if bucket has length > 0...
if (begin != end)
{
for (int i = begin; i < end; ++i)
{
// sets bucket starts value to index of bucket
bucket_starts[i] = tid;
}
}
// last thread writes number of elements to the rest
// of the array
if (tid == num_keys - 1)
{
for (int i = end; i < num_buckets; ++i)
{
bucket_starts[i] = num_keys;
}
}
}
}
| 200cc9164dab5bcb60eaaed653ecdc880e72c87f.cu | /*
* bucket_hash.cu
*
* Created on: Apr 14, 2015
* Author: christian
*/
#include "structures.h"
/*
* Awesome routine from a PhD thesis from UC Davis.
* Title is "Efficient Hash Tables on the GPU" by
* Dan Anthony Feliciano Alcantara.
*/
__global__
void find_boundaries
(
const int num_keys,
const int num_buckets,
const int * __restrict__ which_bucket,
int * __restrict__ bucket_starts
)
{
const int thread_id = currThreadID();
for (int tid = thread_id; tid < num_keys; tid += grid_size())
{
// get start and end of each bucket
const int begin = (tid > 0 ? which_bucket[tid - 1] : 0);
const int end = which_bucket[tid];
// if bucket has length > 0...
if (begin != end)
{
for (int i = begin; i < end; ++i)
{
// sets bucket starts value to index of bucket
bucket_starts[i] = tid;
}
}
// last thread writes number of elements to the rest
// of the array
if (tid == num_keys - 1)
{
for (int i = end; i < num_buckets; ++i)
{
bucket_starts[i] = num_keys;
}
}
}
}
|
56aa6e7f1a8d9b5823954580479d4a1866c0b546.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
#include <algorithm>
#include <iomanip>
#include <limits>
#include "bfs.cuh"
#include "graph.hpp"
#include <utilities/error.hpp>
#include "bfs_kernels_hip.cuh"
#include "mg/bfs.cuh"
#include "mg/common_utils.cuh"
#include "traversal_common.cuh"
#include "utilities/graph_utils.cuh"
namespace cugraph {
namespace detail {
enum BFS_ALGO_STATE { TOPDOWN, BOTTOMUP };
template <typename IndexType>
void BFS<IndexType>::setup()
{
// Determinism flag, false by default
deterministic = false;
// Working data
// Each vertex can be in the frontier at most once
// We will update frontier during the execution
// We need the orig to reset frontier, or ALLOC_FREE_TRY
original_frontier.resize(number_of_vertices);
frontier = original_frontier.data().get();
// size of bitmaps for vertices
vertices_bmap_size = (number_of_vertices / (8 * sizeof(int)) + 1);
// ith bit of visited_bmap is set <=> ith vertex is visited
visited_bmap.resize(vertices_bmap_size);
// ith bit of isolated_bmap is set <=> degree of ith vertex = 0
isolated_bmap.resize(vertices_bmap_size);
// vertices_degree[i] = degree of vertex i
vertex_degree.resize(number_of_vertices);
// We will need (n+1) ints buffer for two differents things (bottom up or top down) - sharing it
// since those uses are mutually exclusive
buffer_np1_1.resize(number_of_vertices + 1);
buffer_np1_2.resize(number_of_vertices + 1);
// Using buffers : top down
// frontier_vertex_degree[i] is the degree of vertex frontier[i]
frontier_vertex_degree = buffer_np1_1.data().get();
// exclusive sum of frontier_vertex_degree
exclusive_sum_frontier_vertex_degree = buffer_np1_2.data().get();
// Using buffers : bottom up
// contains list of unvisited vertices
unvisited_queue = buffer_np1_1.data().get();
// size of the "last" unvisited queue : size_last_unvisited_queue
// refers to the size of unvisited_queue
// which may not be up to date (the queue may contains vertices that are now
// visited)
// We may leave vertices unvisited after bottom up main kernels - storing them
// here
left_unvisited_queue = buffer_np1_2.data().get();
// We use buckets of edges (32 edges per bucket for now, see exact macro in bfs_kernels).
// frontier_vertex_degree_buckets_offsets[i] is the index k such as frontier[k] is the source of
// the first edge of the bucket See top down kernels for more details
exclusive_sum_frontier_vertex_buckets_offsets.resize(
((number_of_edges / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2));
// Init device-side counters
// Those counters must be/can be reset at each bfs iteration
// Keeping them adjacent in memory allow use call only one hipMemset - launch latency is the
// current bottleneck
d_counters_pad.resize(4);
d_new_frontier_cnt = d_counters_pad.data().get();
d_mu = d_counters_pad.data().get() + 1;
d_unvisited_cnt = d_counters_pad.data().get() + 2;
d_left_unvisited_cnt = d_counters_pad.data().get() + 3;
// Lets use this int* for the next 3 lines
// Its dereferenced value is not initialized - so we dont care about what we
// put in it
IndexType *d_nisolated = d_new_frontier_cnt;
hipMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream);
// Computing isolated_bmap
// Only dependent on graph - not source vertex - done once
traversal::flag_isolated_vertices(number_of_vertices,
isolated_bmap.data().get(),
row_offsets,
vertex_degree.data().get(),
d_nisolated,
stream);
hipMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), hipMemcpyDeviceToHost, stream);
// We need nisolated to be ready to use
hipStreamSynchronize(stream);
}
template <typename IndexType>
void BFS<IndexType>::configure(IndexType *_distances,
IndexType *_predecessors,
double *_sp_counters,
int *_edge_mask)
{
distances = _distances;
predecessors = _predecessors;
edge_mask = _edge_mask;
sp_counters = _sp_counters;
useEdgeMask = (edge_mask != NULL);
computeDistances = (distances != NULL);
computePredecessors = (predecessors != NULL);
// We need distances to use bottom up
if (directed && !computeDistances) {
distances_vals.resize(number_of_vertices);
distances = distances_vals.data().get();
}
// In case the shortest path counters is required, previous_bmap has to be allocated
if (sp_counters) { previous_visited_bmap.resize(vertices_bmap_size); }
}
template <typename IndexType>
void BFS<IndexType>::traverse(IndexType source_vertex)
{
// Init visited_bmap
// If the graph is undirected, we not that
// we will never discover isolated vertices (in degree = out degree = 0)
// we avoid a lot of work by flagging them now
// in g500 graphs they represent ~25% of total vertices
// more than that for wiki and twitter graphs
if (directed) {
hipMemsetAsync(visited_bmap.data().get(), 0, vertices_bmap_size * sizeof(int), stream);
} else {
hipMemcpyAsync(visited_bmap.data().get(),
isolated_bmap.data().get(),
vertices_bmap_size * sizeof(int),
hipMemcpyDeviceToDevice,
stream);
}
// If needed, setting all vertices as undiscovered (inf distance)
// We dont use computeDistances here
// if the graph is undirected, we may need distances even if
// computeDistances is false
if (distances)
traversal::fill_vec(distances, number_of_vertices, traversal::vec_t<IndexType>::max, stream);
// If needed, setting all predecessors to non-existent (-1)
if (computePredecessors) {
hipMemsetAsync(predecessors, -1, number_of_vertices * sizeof(IndexType), stream);
}
if (sp_counters) {
hipMemsetAsync(sp_counters, 0, number_of_vertices * sizeof(double), stream);
double value = 1;
hipMemcpyAsync(sp_counters + source_vertex, &value, sizeof(double), hipMemcpyHostToDevice);
}
//
// Initial frontier
//
frontier = original_frontier.data().get();
if (distances) { hipMemsetAsync(&distances[source_vertex], 0, sizeof(IndexType), stream); }
// Setting source_vertex as visited
// There may be bit already set on that bmap (isolated vertices) - if the
// graph is undirected
int current_visited_bmap_source_vert = 0;
if (!directed) {
hipMemcpyAsync(¤t_visited_bmap_source_vert,
visited_bmap.data().get() + (source_vertex / INT_SIZE),
sizeof(int),
hipMemcpyDeviceToHost);
// We need current_visited_bmap_source_vert
hipStreamSynchronize(stream);
}
int m = (1 << (source_vertex % INT_SIZE));
// In that case, source is isolated, done now
if (!directed && (m & current_visited_bmap_source_vert)) {
// Init distances and predecessors are done, (cf Streamsync in previous if)
return;
}
m |= current_visited_bmap_source_vert;
hipMemcpyAsync(visited_bmap.data().get() + (source_vertex / INT_SIZE),
&m,
sizeof(int),
hipMemcpyHostToDevice,
stream);
// Adding source_vertex to init frontier
hipMemcpyAsync(&frontier[0], &source_vertex, sizeof(IndexType), hipMemcpyHostToDevice, stream);
// mf : edges in frontier
// nf : vertices in frontier
// mu : edges undiscovered
// nu : nodes undiscovered
// lvl : current frontier's depth
IndexType mf, nf, mu, nu;
bool growing;
IndexType lvl = 1;
// Frontier has one vertex
nf = 1;
// all edges are undiscovered (by def isolated vertices have 0 edges)
mu = number_of_edges;
// all non isolated vertices are undiscovered (excepted source vertex, which is in frontier)
// That number is wrong if source_vertex is also isolated - but it's not important
nu = number_of_vertices - nisolated - nf;
// Last frontier was 0, now it is 1
growing = true;
IndexType size_last_left_unvisited_queue = number_of_vertices; // we just need value > 0
IndexType size_last_unvisited_queue = 0; // queue empty
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream);
traversal::exclusive_sum(
frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream);
hipMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
// We need mf
hipStreamSynchronize(stream);
// At first we know we have to use top down
BFS_ALGO_STATE algo_state = TOPDOWN;
// useDistances : we check if a vertex is a parent using distances in bottom up - distances become
// working data undirected g : need parents to be in children's neighbors
// In case the shortest path counters need to be computeed, the bottom_up approach cannot be used
bool can_use_bottom_up = (!sp_counters && !directed && distances);
while (nf > 0) {
new_frontier = frontier + nf;
IndexType old_nf = nf;
resetDevicePointers();
if (can_use_bottom_up) {
// Choosing algo
// Finite machine described in http://parlab.eecs.berkeley.edu/sites/all/parlab/files/main.pdf
switch (algo_state) {
case TOPDOWN:
if (mf > mu / alpha) algo_state = BOTTOMUP;
break;
case BOTTOMUP:
if (!growing && nf < number_of_vertices / beta) {
// We need to prepare the switch back to top down
// We couldnt keep track of mu during bottom up - because we dont know what mf is.
// Computing mu here
bfs_kernels::count_unvisited_edges(unvisited_queue,
size_last_unvisited_queue,
visited_bmap.data().get(),
vertex_degree.data().get(),
d_mu,
stream);
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream);
traversal::exclusive_sum(
frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream);
hipMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
hipMemcpyAsync(&mu, d_mu, sizeof(IndexType), hipMemcpyDeviceToHost, stream);
// We will need mf and mu
hipStreamSynchronize(stream);
algo_state = TOPDOWN;
}
break;
}
}
// Executing algo
switch (algo_state) {
case TOPDOWN:
// This step is only required if sp_counters is not nullptr
if (sp_counters) {
hipMemcpyAsync(previous_visited_bmap.data().get(),
visited_bmap.data().get(),
vertices_bmap_size * sizeof(int),
hipMemcpyDeviceToDevice,
stream);
// We need to copy the visited_bmap before doing the traversal
hipStreamSynchronize(stream);
}
traversal::compute_bucket_offsets(
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets.data().get(),
nf,
mf,
stream);
bfs_kernels::frontier_expand(row_offsets,
col_indices,
frontier,
nf,
mf,
lvl,
new_frontier,
d_new_frontier_cnt,
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets.data().get(),
previous_visited_bmap.data().get(),
visited_bmap.data().get(),
distances,
predecessors,
sp_counters,
edge_mask,
isolated_bmap.data().get(),
directed,
stream,
deterministic);
mu -= mf;
hipMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), hipMemcpyDeviceToHost, stream);
CHECK_CUDA(stream);
// We need nf
hipStreamSynchronize(stream);
if (nf) {
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, new_frontier, vertex_degree.data().get(), nf, stream);
traversal::exclusive_sum(
frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream);
hipMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
// We need mf
hipStreamSynchronize(stream);
}
break;
case BOTTOMUP:
bfs_kernels::fill_unvisited_queue(visited_bmap.data().get(),
vertices_bmap_size,
number_of_vertices,
unvisited_queue,
d_unvisited_cnt,
stream,
deterministic);
size_last_unvisited_queue = nu;
bfs_kernels::bottom_up_main(unvisited_queue,
size_last_unvisited_queue,
left_unvisited_queue,
d_left_unvisited_cnt,
visited_bmap.data().get(),
row_offsets,
col_indices,
lvl,
new_frontier,
d_new_frontier_cnt,
distances,
predecessors,
edge_mask,
stream,
deterministic);
// The number of vertices left unvisited decreases
// If it wasnt necessary last time, it wont be this time
if (size_last_left_unvisited_queue) {
hipMemcpyAsync(&size_last_left_unvisited_queue,
d_left_unvisited_cnt,
sizeof(IndexType),
hipMemcpyDeviceToHost,
stream);
CHECK_CUDA(stream);
// We need last_left_unvisited_size
hipStreamSynchronize(stream);
bfs_kernels::bottom_up_large(left_unvisited_queue,
size_last_left_unvisited_queue,
visited_bmap.data().get(),
row_offsets,
col_indices,
lvl,
new_frontier,
d_new_frontier_cnt,
distances,
predecessors,
edge_mask,
stream,
deterministic);
}
hipMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), hipMemcpyDeviceToHost, stream);
CHECK_CUDA(stream);
// We will need nf
hipStreamSynchronize(stream);
break;
}
// Updating undiscovered edges count
nu -= nf;
// Using new frontier
frontier = new_frontier;
growing = (nf > old_nf);
++lvl;
}
}
template <typename IndexType>
void BFS<IndexType>::resetDevicePointers()
{
hipMemsetAsync(d_counters_pad.data().get(), 0, 4 * sizeof(IndexType), stream);
}
template <typename IndexType>
void BFS<IndexType>::clean()
{
// the vectors have a destructor that takes care of cleaning
}
// Explicit Instantiation
template class BFS<uint32_t>;
template class BFS<int>;
template class BFS<int64_t>;
} // namespace detail
// NOTE: SP counter increase extremely fast on large graph
// It can easily reach 1e40~1e70 on GAP-road.mtx
template <typename VT, typename ET, typename WT>
void bfs(raft::handle_t const &handle,
GraphCSRView<VT, ET, WT> const &graph,
VT *distances,
VT *predecessors,
double *sp_counters,
const VT start_vertex,
bool directed,
bool mg_batch)
{
static_assert(std::is_integral<VT>::value && sizeof(VT) >= sizeof(int32_t),
"Unsupported vertex id data type. Use integral types of size >= sizeof(int32_t)");
static_assert(std::is_same<VT, ET>::value,
"VT and ET should be the same time for the current BFS implementation");
static_assert(std::is_floating_point<WT>::value,
"Unsupported edge weight type. Use floating point types"); // actually, this is
// unnecessary for BFS
if (handle.comms_initialized() && !mg_batch) {
CUGRAPH_EXPECTS(sp_counters == nullptr,
"BFS Traversal shortest path is not supported in MG path");
mg::bfs<VT, ET, WT>(handle, graph, distances, predecessors, start_vertex);
} else {
VT number_of_vertices = graph.number_of_vertices;
ET number_of_edges = graph.number_of_edges;
const VT *indices_ptr = graph.indices;
const ET *offsets_ptr = graph.offsets;
int alpha = 15;
int beta = 18;
// FIXME: Use VT and ET in the BFS detail
cugraph::detail::BFS<VT> bfs(
number_of_vertices, number_of_edges, offsets_ptr, indices_ptr, directed, alpha, beta);
bfs.configure(distances, predecessors, sp_counters, nullptr);
bfs.traverse(start_vertex);
}
}
// Explicit Instantiation
template void bfs<uint32_t, uint32_t, float>(raft::handle_t const &handle,
GraphCSRView<uint32_t, uint32_t, float> const &graph,
uint32_t *distances,
uint32_t *predecessors,
double *sp_counters,
const uint32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<uint32_t, uint32_t, double>(raft::handle_t const &handle,
GraphCSRView<uint32_t, uint32_t, double> const &graph,
uint32_t *distances,
uint32_t *predecessors,
double *sp_counters,
const uint32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int32_t, int32_t, float>(raft::handle_t const &handle,
GraphCSRView<int32_t, int32_t, float> const &graph,
int32_t *distances,
int32_t *predecessors,
double *sp_counters,
const int32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int32_t, int32_t, double>(raft::handle_t const &handle,
GraphCSRView<int32_t, int32_t, double> const &graph,
int32_t *distances,
int32_t *predecessors,
double *sp_counters,
const int32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int64_t, int64_t, float>(raft::handle_t const &handle,
GraphCSRView<int64_t, int64_t, float> const &graph,
int64_t *distances,
int64_t *predecessors,
double *sp_counters,
const int64_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int64_t, int64_t, double>(raft::handle_t const &handle,
GraphCSRView<int64_t, int64_t, double> const &graph,
int64_t *distances,
int64_t *predecessors,
double *sp_counters,
const int64_t source_vertex,
bool directed,
bool mg_batch);
} // namespace cugraph
| 56aa6e7f1a8d9b5823954580479d4a1866c0b546.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
#include <algorithm>
#include <iomanip>
#include <limits>
#include "bfs.cuh"
#include "graph.hpp"
#include <utilities/error.hpp>
#include "bfs_kernels.cuh"
#include "mg/bfs.cuh"
#include "mg/common_utils.cuh"
#include "traversal_common.cuh"
#include "utilities/graph_utils.cuh"
namespace cugraph {
namespace detail {
enum BFS_ALGO_STATE { TOPDOWN, BOTTOMUP };
template <typename IndexType>
void BFS<IndexType>::setup()
{
// Determinism flag, false by default
deterministic = false;
// Working data
// Each vertex can be in the frontier at most once
// We will update frontier during the execution
// We need the orig to reset frontier, or ALLOC_FREE_TRY
original_frontier.resize(number_of_vertices);
frontier = original_frontier.data().get();
// size of bitmaps for vertices
vertices_bmap_size = (number_of_vertices / (8 * sizeof(int)) + 1);
// ith bit of visited_bmap is set <=> ith vertex is visited
visited_bmap.resize(vertices_bmap_size);
// ith bit of isolated_bmap is set <=> degree of ith vertex = 0
isolated_bmap.resize(vertices_bmap_size);
// vertices_degree[i] = degree of vertex i
vertex_degree.resize(number_of_vertices);
// We will need (n+1) ints buffer for two differents things (bottom up or top down) - sharing it
// since those uses are mutually exclusive
buffer_np1_1.resize(number_of_vertices + 1);
buffer_np1_2.resize(number_of_vertices + 1);
// Using buffers : top down
// frontier_vertex_degree[i] is the degree of vertex frontier[i]
frontier_vertex_degree = buffer_np1_1.data().get();
// exclusive sum of frontier_vertex_degree
exclusive_sum_frontier_vertex_degree = buffer_np1_2.data().get();
// Using buffers : bottom up
// contains list of unvisited vertices
unvisited_queue = buffer_np1_1.data().get();
// size of the "last" unvisited queue : size_last_unvisited_queue
// refers to the size of unvisited_queue
// which may not be up to date (the queue may contains vertices that are now
// visited)
// We may leave vertices unvisited after bottom up main kernels - storing them
// here
left_unvisited_queue = buffer_np1_2.data().get();
// We use buckets of edges (32 edges per bucket for now, see exact macro in bfs_kernels).
// frontier_vertex_degree_buckets_offsets[i] is the index k such as frontier[k] is the source of
// the first edge of the bucket See top down kernels for more details
exclusive_sum_frontier_vertex_buckets_offsets.resize(
((number_of_edges / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2));
// Init device-side counters
// Those counters must be/can be reset at each bfs iteration
// Keeping them adjacent in memory allow use call only one cudaMemset - launch latency is the
// current bottleneck
d_counters_pad.resize(4);
d_new_frontier_cnt = d_counters_pad.data().get();
d_mu = d_counters_pad.data().get() + 1;
d_unvisited_cnt = d_counters_pad.data().get() + 2;
d_left_unvisited_cnt = d_counters_pad.data().get() + 3;
// Lets use this int* for the next 3 lines
// Its dereferenced value is not initialized - so we dont care about what we
// put in it
IndexType *d_nisolated = d_new_frontier_cnt;
cudaMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream);
// Computing isolated_bmap
// Only dependent on graph - not source vertex - done once
traversal::flag_isolated_vertices(number_of_vertices,
isolated_bmap.data().get(),
row_offsets,
vertex_degree.data().get(),
d_nisolated,
stream);
cudaMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), cudaMemcpyDeviceToHost, stream);
// We need nisolated to be ready to use
cudaStreamSynchronize(stream);
}
template <typename IndexType>
void BFS<IndexType>::configure(IndexType *_distances,
IndexType *_predecessors,
double *_sp_counters,
int *_edge_mask)
{
distances = _distances;
predecessors = _predecessors;
edge_mask = _edge_mask;
sp_counters = _sp_counters;
useEdgeMask = (edge_mask != NULL);
computeDistances = (distances != NULL);
computePredecessors = (predecessors != NULL);
// We need distances to use bottom up
if (directed && !computeDistances) {
distances_vals.resize(number_of_vertices);
distances = distances_vals.data().get();
}
// In case the shortest path counters is required, previous_bmap has to be allocated
if (sp_counters) { previous_visited_bmap.resize(vertices_bmap_size); }
}
template <typename IndexType>
void BFS<IndexType>::traverse(IndexType source_vertex)
{
// Init visited_bmap
// If the graph is undirected, we not that
// we will never discover isolated vertices (in degree = out degree = 0)
// we avoid a lot of work by flagging them now
// in g500 graphs they represent ~25% of total vertices
// more than that for wiki and twitter graphs
if (directed) {
cudaMemsetAsync(visited_bmap.data().get(), 0, vertices_bmap_size * sizeof(int), stream);
} else {
cudaMemcpyAsync(visited_bmap.data().get(),
isolated_bmap.data().get(),
vertices_bmap_size * sizeof(int),
cudaMemcpyDeviceToDevice,
stream);
}
// If needed, setting all vertices as undiscovered (inf distance)
// We dont use computeDistances here
// if the graph is undirected, we may need distances even if
// computeDistances is false
if (distances)
traversal::fill_vec(distances, number_of_vertices, traversal::vec_t<IndexType>::max, stream);
// If needed, setting all predecessors to non-existent (-1)
if (computePredecessors) {
cudaMemsetAsync(predecessors, -1, number_of_vertices * sizeof(IndexType), stream);
}
if (sp_counters) {
cudaMemsetAsync(sp_counters, 0, number_of_vertices * sizeof(double), stream);
double value = 1;
cudaMemcpyAsync(sp_counters + source_vertex, &value, sizeof(double), cudaMemcpyHostToDevice);
}
//
// Initial frontier
//
frontier = original_frontier.data().get();
if (distances) { cudaMemsetAsync(&distances[source_vertex], 0, sizeof(IndexType), stream); }
// Setting source_vertex as visited
// There may be bit already set on that bmap (isolated vertices) - if the
// graph is undirected
int current_visited_bmap_source_vert = 0;
if (!directed) {
cudaMemcpyAsync(¤t_visited_bmap_source_vert,
visited_bmap.data().get() + (source_vertex / INT_SIZE),
sizeof(int),
cudaMemcpyDeviceToHost);
// We need current_visited_bmap_source_vert
cudaStreamSynchronize(stream);
}
int m = (1 << (source_vertex % INT_SIZE));
// In that case, source is isolated, done now
if (!directed && (m & current_visited_bmap_source_vert)) {
// Init distances and predecessors are done, (cf Streamsync in previous if)
return;
}
m |= current_visited_bmap_source_vert;
cudaMemcpyAsync(visited_bmap.data().get() + (source_vertex / INT_SIZE),
&m,
sizeof(int),
cudaMemcpyHostToDevice,
stream);
// Adding source_vertex to init frontier
cudaMemcpyAsync(&frontier[0], &source_vertex, sizeof(IndexType), cudaMemcpyHostToDevice, stream);
// mf : edges in frontier
// nf : vertices in frontier
// mu : edges undiscovered
// nu : nodes undiscovered
// lvl : current frontier's depth
IndexType mf, nf, mu, nu;
bool growing;
IndexType lvl = 1;
// Frontier has one vertex
nf = 1;
// all edges are undiscovered (by def isolated vertices have 0 edges)
mu = number_of_edges;
// all non isolated vertices are undiscovered (excepted source vertex, which is in frontier)
// That number is wrong if source_vertex is also isolated - but it's not important
nu = number_of_vertices - nisolated - nf;
// Last frontier was 0, now it is 1
growing = true;
IndexType size_last_left_unvisited_queue = number_of_vertices; // we just need value > 0
IndexType size_last_unvisited_queue = 0; // queue empty
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream);
traversal::exclusive_sum(
frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream);
cudaMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
// We need mf
cudaStreamSynchronize(stream);
// At first we know we have to use top down
BFS_ALGO_STATE algo_state = TOPDOWN;
// useDistances : we check if a vertex is a parent using distances in bottom up - distances become
// working data undirected g : need parents to be in children's neighbors
// In case the shortest path counters need to be computeed, the bottom_up approach cannot be used
bool can_use_bottom_up = (!sp_counters && !directed && distances);
while (nf > 0) {
new_frontier = frontier + nf;
IndexType old_nf = nf;
resetDevicePointers();
if (can_use_bottom_up) {
// Choosing algo
// Finite machine described in http://parlab.eecs.berkeley.edu/sites/all/parlab/files/main.pdf
switch (algo_state) {
case TOPDOWN:
if (mf > mu / alpha) algo_state = BOTTOMUP;
break;
case BOTTOMUP:
if (!growing && nf < number_of_vertices / beta) {
// We need to prepare the switch back to top down
// We couldnt keep track of mu during bottom up - because we dont know what mf is.
// Computing mu here
bfs_kernels::count_unvisited_edges(unvisited_queue,
size_last_unvisited_queue,
visited_bmap.data().get(),
vertex_degree.data().get(),
d_mu,
stream);
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream);
traversal::exclusive_sum(
frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream);
cudaMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
cudaMemcpyAsync(&mu, d_mu, sizeof(IndexType), cudaMemcpyDeviceToHost, stream);
// We will need mf and mu
cudaStreamSynchronize(stream);
algo_state = TOPDOWN;
}
break;
}
}
// Executing algo
switch (algo_state) {
case TOPDOWN:
// This step is only required if sp_counters is not nullptr
if (sp_counters) {
cudaMemcpyAsync(previous_visited_bmap.data().get(),
visited_bmap.data().get(),
vertices_bmap_size * sizeof(int),
cudaMemcpyDeviceToDevice,
stream);
// We need to copy the visited_bmap before doing the traversal
cudaStreamSynchronize(stream);
}
traversal::compute_bucket_offsets(
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets.data().get(),
nf,
mf,
stream);
bfs_kernels::frontier_expand(row_offsets,
col_indices,
frontier,
nf,
mf,
lvl,
new_frontier,
d_new_frontier_cnt,
exclusive_sum_frontier_vertex_degree,
exclusive_sum_frontier_vertex_buckets_offsets.data().get(),
previous_visited_bmap.data().get(),
visited_bmap.data().get(),
distances,
predecessors,
sp_counters,
edge_mask,
isolated_bmap.data().get(),
directed,
stream,
deterministic);
mu -= mf;
cudaMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream);
CHECK_CUDA(stream);
// We need nf
cudaStreamSynchronize(stream);
if (nf) {
// Typical pre-top down workflow. set_frontier_degree + exclusive-scan
traversal::set_frontier_degree(
frontier_vertex_degree, new_frontier, vertex_degree.data().get(), nf, stream);
traversal::exclusive_sum(
frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream);
cudaMemcpyAsync(&mf,
&exclusive_sum_frontier_vertex_degree[nf],
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
// We need mf
cudaStreamSynchronize(stream);
}
break;
case BOTTOMUP:
bfs_kernels::fill_unvisited_queue(visited_bmap.data().get(),
vertices_bmap_size,
number_of_vertices,
unvisited_queue,
d_unvisited_cnt,
stream,
deterministic);
size_last_unvisited_queue = nu;
bfs_kernels::bottom_up_main(unvisited_queue,
size_last_unvisited_queue,
left_unvisited_queue,
d_left_unvisited_cnt,
visited_bmap.data().get(),
row_offsets,
col_indices,
lvl,
new_frontier,
d_new_frontier_cnt,
distances,
predecessors,
edge_mask,
stream,
deterministic);
// The number of vertices left unvisited decreases
// If it wasnt necessary last time, it wont be this time
if (size_last_left_unvisited_queue) {
cudaMemcpyAsync(&size_last_left_unvisited_queue,
d_left_unvisited_cnt,
sizeof(IndexType),
cudaMemcpyDeviceToHost,
stream);
CHECK_CUDA(stream);
// We need last_left_unvisited_size
cudaStreamSynchronize(stream);
bfs_kernels::bottom_up_large(left_unvisited_queue,
size_last_left_unvisited_queue,
visited_bmap.data().get(),
row_offsets,
col_indices,
lvl,
new_frontier,
d_new_frontier_cnt,
distances,
predecessors,
edge_mask,
stream,
deterministic);
}
cudaMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream);
CHECK_CUDA(stream);
// We will need nf
cudaStreamSynchronize(stream);
break;
}
// Updating undiscovered edges count
nu -= nf;
// Using new frontier
frontier = new_frontier;
growing = (nf > old_nf);
++lvl;
}
}
template <typename IndexType>
void BFS<IndexType>::resetDevicePointers()
{
cudaMemsetAsync(d_counters_pad.data().get(), 0, 4 * sizeof(IndexType), stream);
}
template <typename IndexType>
void BFS<IndexType>::clean()
{
// the vectors have a destructor that takes care of cleaning
}
// Explicit Instantiation
template class BFS<uint32_t>;
template class BFS<int>;
template class BFS<int64_t>;
} // namespace detail
// NOTE: SP counter increase extremely fast on large graph
// It can easily reach 1e40~1e70 on GAP-road.mtx
template <typename VT, typename ET, typename WT>
void bfs(raft::handle_t const &handle,
GraphCSRView<VT, ET, WT> const &graph,
VT *distances,
VT *predecessors,
double *sp_counters,
const VT start_vertex,
bool directed,
bool mg_batch)
{
static_assert(std::is_integral<VT>::value && sizeof(VT) >= sizeof(int32_t),
"Unsupported vertex id data type. Use integral types of size >= sizeof(int32_t)");
static_assert(std::is_same<VT, ET>::value,
"VT and ET should be the same time for the current BFS implementation");
static_assert(std::is_floating_point<WT>::value,
"Unsupported edge weight type. Use floating point types"); // actually, this is
// unnecessary for BFS
if (handle.comms_initialized() && !mg_batch) {
CUGRAPH_EXPECTS(sp_counters == nullptr,
"BFS Traversal shortest path is not supported in MG path");
mg::bfs<VT, ET, WT>(handle, graph, distances, predecessors, start_vertex);
} else {
VT number_of_vertices = graph.number_of_vertices;
ET number_of_edges = graph.number_of_edges;
const VT *indices_ptr = graph.indices;
const ET *offsets_ptr = graph.offsets;
int alpha = 15;
int beta = 18;
// FIXME: Use VT and ET in the BFS detail
cugraph::detail::BFS<VT> bfs(
number_of_vertices, number_of_edges, offsets_ptr, indices_ptr, directed, alpha, beta);
bfs.configure(distances, predecessors, sp_counters, nullptr);
bfs.traverse(start_vertex);
}
}
// Explicit Instantiation
template void bfs<uint32_t, uint32_t, float>(raft::handle_t const &handle,
GraphCSRView<uint32_t, uint32_t, float> const &graph,
uint32_t *distances,
uint32_t *predecessors,
double *sp_counters,
const uint32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<uint32_t, uint32_t, double>(raft::handle_t const &handle,
GraphCSRView<uint32_t, uint32_t, double> const &graph,
uint32_t *distances,
uint32_t *predecessors,
double *sp_counters,
const uint32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int32_t, int32_t, float>(raft::handle_t const &handle,
GraphCSRView<int32_t, int32_t, float> const &graph,
int32_t *distances,
int32_t *predecessors,
double *sp_counters,
const int32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int32_t, int32_t, double>(raft::handle_t const &handle,
GraphCSRView<int32_t, int32_t, double> const &graph,
int32_t *distances,
int32_t *predecessors,
double *sp_counters,
const int32_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int64_t, int64_t, float>(raft::handle_t const &handle,
GraphCSRView<int64_t, int64_t, float> const &graph,
int64_t *distances,
int64_t *predecessors,
double *sp_counters,
const int64_t source_vertex,
bool directed,
bool mg_batch);
// Explicit Instantiation
template void bfs<int64_t, int64_t, double>(raft::handle_t const &handle,
GraphCSRView<int64_t, int64_t, double> const &graph,
int64_t *distances,
int64_t *predecessors,
double *sp_counters,
const int64_t source_vertex,
bool directed,
bool mg_batch);
} // namespace cugraph
|
c077f867f48f441033c61b8b38c89724f7ac51be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip_runtime.h"
/**
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef HELPER_CUDA_H
#define HELPER_CUDA_H
#pragma once
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "helper_string.h"
/*
inline void __ExitInTime(int seconds)
{
fprintf(stdout, "> exiting in %d seconds: ", seconds);
fflush(stdout);
time_t t;
int count;
for (t=time(0)+seconds, count=seconds; time(0) < t; count--) {
fprintf(stdout, "%d...", count);
#if defined(WIN32)
Sleep(1000);
#else
sleep(1);
#endif
}
fprintf(stdout,"done!\n\n");
fflush(stdout);
}
#define EXIT_TIME_DELAY 2
inline void EXIT_DELAY(int return_code)
{
__ExitInTime(EXIT_TIME_DELAY);
exit(return_code);
}
*/
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header files, please
// refer the CUDA examples for examples of the needed CUDA headers, which may change depending
// on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DRIVER_TYPES_H__
static const char *_cudaGetErrorEnum(hipError_t error)
{
switch (error)
{
case hipSuccess:
return "hipSuccess";
case hipErrorMissingConfiguration:
return "hipErrorMissingConfiguration";
case hipErrorMemoryAllocation:
return "hipErrorMemoryAllocation";
case hipErrorInitializationError:
return "hipErrorInitializationError";
case hipErrorLaunchFailure:
return "hipErrorLaunchFailure";
case hipErrorPriorLaunchFailure:
return "hipErrorPriorLaunchFailure";
case hipErrorLaunchTimeOut:
return "hipErrorLaunchTimeOut";
case hipErrorLaunchOutOfResources:
return "hipErrorLaunchOutOfResources";
case hipErrorInvalidDeviceFunction:
return "hipErrorInvalidDeviceFunction";
case hipErrorInvalidConfiguration:
return "hipErrorInvalidConfiguration";
case hipErrorInvalidDevice:
return "hipErrorInvalidDevice";
case hipErrorInvalidValue:
return "hipErrorInvalidValue";
case hipErrorInvalidPitchValue:
return "hipErrorInvalidPitchValue";
case hipErrorInvalidSymbol:
return "hipErrorInvalidSymbol";
case hipErrorMapFailed:
return "hipErrorMapFailed";
case hipErrorUnmapFailed:
return "hipErrorUnmapFailed";
case hipErrorInvalidHostPointer:
return "hipErrorInvalidHostPointer";
case hipErrorInvalidDevicePointer:
return "hipErrorInvalidDevicePointer";
case hipErrorInvalidTexture:
return "hipErrorInvalidTexture";
case hipErrorInvalidTextureBinding:
return "hipErrorInvalidTextureBinding";
case hipErrorInvalidChannelDescriptor:
return "hipErrorInvalidChannelDescriptor";
case hipErrorInvalidMemcpyDirection:
return "hipErrorInvalidMemcpyDirection";
case hipErrorAddressOfConstant:
return "hipErrorAddressOfConstant";
case hipErrorTextureFetchFailed:
return "hipErrorTextureFetchFailed";
case hipErrorTextureNotBound:
return "hipErrorTextureNotBound";
case hipErrorSynchronizationError:
return "hipErrorSynchronizationError";
case hipErrorInvalidFilterSetting:
return "hipErrorInvalidFilterSetting";
case hipErrorInvalidNormSetting:
return "hipErrorInvalidNormSetting";
case hipErrorMixedDeviceExecution:
return "hipErrorMixedDeviceExecution";
case hipErrorDeinitialized:
return "hipErrorDeinitialized";
case hipErrorUnknown:
return "hipErrorUnknown";
case hipErrorNotYetImplemented:
return "hipErrorNotYetImplemented";
case hipErrorMemoryValueTooLarge:
return "hipErrorMemoryValueTooLarge";
case hipErrorInvalidResourceHandle:
return "hipErrorInvalidResourceHandle";
case hipErrorNotReady:
return "hipErrorNotReady";
case hipErrorInsufficientDriver:
return "hipErrorInsufficientDriver";
case hipErrorSetOnActiveProcess:
return "hipErrorSetOnActiveProcess";
case hipErrorInvalidSurface:
return "hipErrorInvalidSurface";
case hipErrorNoDevice:
return "hipErrorNoDevice";
case hipErrorECCNotCorrectable:
return "hipErrorECCNotCorrectable";
case hipErrorSharedObjectSymbolNotFound:
return "hipErrorSharedObjectSymbolNotFound";
case hipErrorSharedObjectInitFailed:
return "hipErrorSharedObjectInitFailed";
case hipErrorUnsupportedLimit:
return "hipErrorUnsupportedLimit";
case hipErrorDuplicateVariableName:
return "hipErrorDuplicateVariableName";
case hipErrorDuplicateTextureName:
return "hipErrorDuplicateTextureName";
case hipErrorDuplicateSurfaceName:
return "hipErrorDuplicateSurfaceName";
case hipErrorDevicesUnavailable:
return "hipErrorDevicesUnavailable";
case hipErrorInvalidImage:
return "hipErrorInvalidImage";
case hipErrorNoBinaryForGpu:
return "hipErrorNoBinaryForGpu";
case hipErrorIncompatibleDriverContext:
return "hipErrorIncompatibleDriverContext";
case hipErrorPeerAccessAlreadyEnabled:
return "hipErrorPeerAccessAlreadyEnabled";
case hipErrorPeerAccessNotEnabled:
return "hipErrorPeerAccessNotEnabled";
case hipErrorDeviceAlreadyInUse:
return "hipErrorDeviceAlreadyInUse";
case hipErrorProfilerDisabled:
return "hipErrorProfilerDisabled";
case hipErrorProfilerNotInitialized:
return "hipErrorProfilerNotInitialized";
case hipErrorProfilerAlreadyStarted:
return "hipErrorProfilerAlreadyStarted";
case hipErrorProfilerAlreadyStopped:
return "hipErrorProfilerAlreadyStopped";
#if __CUDA_API_VERSION >= 0x4000
case hipErrorAssert:
return "hipErrorAssert";
case hipErrorTooManyPeers:
return "hipErrorTooManyPeers";
case hipErrorHostMemoryAlreadyRegistered:
return "hipErrorHostMemoryAlreadyRegistered";
case hipErrorHostMemoryNotRegistered:
return "hipErrorHostMemoryNotRegistered";
#endif
case hipErrorStartupFailure:
return "hipErrorStartupFailure";
case hipErrorApiFailureBase:
return "hipErrorApiFailureBase";
}
return "<unknown>";
}
#endif
#ifdef __cuda_cuda_h__
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(hipError_t error)
{
switch (error)
{
case hipSuccess:
return "hipSuccess";
case hipErrorInvalidValue:
return "hipErrorInvalidValue";
case hipErrorMemoryAllocation:
return "hipErrorMemoryAllocation";
case hipErrorNotInitialized:
return "hipErrorNotInitialized";
case hipErrorDeinitialized:
return "hipErrorDeinitialized";
case hipErrorProfilerDisabled:
return "hipErrorProfilerDisabled";
case hipErrorProfilerNotInitialized:
return "hipErrorProfilerNotInitialized";
case hipErrorProfilerAlreadyStarted:
return "hipErrorProfilerAlreadyStarted";
case hipErrorProfilerAlreadyStopped:
return "hipErrorProfilerAlreadyStopped";
case hipErrorNoDevice:
return "hipErrorNoDevice";
case hipErrorInvalidDevice:
return "hipErrorInvalidDevice";
case hipErrorInvalidImage:
return "hipErrorInvalidImage";
case hipErrorInvalidContext:
return "hipErrorInvalidContext";
case hipErrorContextAlreadyCurrent:
return "hipErrorContextAlreadyCurrent";
case hipErrorMapFailed:
return "hipErrorMapFailed";
case hipErrorUnmapFailed:
return "hipErrorUnmapFailed";
case hipErrorArrayIsMapped:
return "hipErrorArrayIsMapped";
case hipErrorAlreadyMapped:
return "hipErrorAlreadyMapped";
case hipErrorNoBinaryForGpu:
return "hipErrorNoBinaryForGpu";
case hipErrorAlreadyAcquired:
return "hipErrorAlreadyAcquired";
case hipErrorNotMapped:
return "hipErrorNotMapped";
case hipErrorNotMappedAsArray:
return "hipErrorNotMappedAsArray";
case hipErrorNotMappedAsPointer:
return "hipErrorNotMappedAsPointer";
case hipErrorECCNotCorrectable:
return "hipErrorECCNotCorrectable";
case hipErrorUnsupportedLimit:
return "hipErrorUnsupportedLimit";
case hipErrorContextAlreadyInUse:
return "hipErrorContextAlreadyInUse";
case hipErrorInvalidSource:
return "hipErrorInvalidSource";
case hipErrorFileNotFound:
return "hipErrorFileNotFound";
case hipErrorSharedObjectSymbolNotFound:
return "hipErrorSharedObjectSymbolNotFound";
case hipErrorSharedObjectInitFailed:
return "hipErrorSharedObjectInitFailed";
case hipErrorOperatingSystem:
return "hipErrorOperatingSystem";
case hipErrorInvalidResourceHandle:
return "hipErrorInvalidResourceHandle";
case hipErrorNotFound:
return "hipErrorNotFound";
case hipErrorNotReady:
return "hipErrorNotReady";
case hipErrorLaunchFailure:
return "hipErrorLaunchFailure";
case hipErrorLaunchOutOfResources:
return "hipErrorLaunchOutOfResources";
case hipErrorLaunchTimeOut:
return "hipErrorLaunchTimeOut";
case hipErrorLaunchIncompatibleTexturing:
return "hipErrorLaunchIncompatibleTexturing";
case hipErrorPeerAccessAlreadyEnabled:
return "hipErrorPeerAccessAlreadyEnabled";
case hipErrorPeerAccessNotEnabled:
return "hipErrorPeerAccessNotEnabled";
case hipErrorPrimaryContextActive:
return "hipErrorPrimaryContextActive";
case hipErrorContextIsDestroyed:
return "hipErrorContextIsDestroyed";
case hipErrorAssert:
return "hipErrorAssert";
case hipErrorTooManyPeers:
return "hipErrorTooManyPeers";
case hipErrorHostMemoryAlreadyRegistered:
return "hipErrorHostMemoryAlreadyRegistered";
case hipErrorHostMemoryNotRegistered:
return "hipErrorHostMemoryNotRegistered";
case hipErrorUnknown:
return "hipErrorUnknown";
}
return "<unknown>";
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(hipfftResult error)
{
switch (error)
{
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(hipsparseStatus_t error)
{
switch (error)
{
case HIPSPARSE_STATUS_SUCCESS:
return "HIPSPARSE_STATUS_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "HIPSPARSE_STATUS_MAPPING_ERROR";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(hiprandStatus_t error)
{
switch (error)
{
case HIPRAND_STATUS_SUCCESS:
return "HIPRAND_STATUS_SUCCESS";
case HIPRAND_STATUS_VERSION_MISMATCH:
return "HIPRAND_STATUS_VERSION_MISMATCH";
case HIPRAND_STATUS_NOT_INITIALIZED:
return "HIPRAND_STATUS_NOT_INITIALIZED";
case HIPRAND_STATUS_ALLOCATION_FAILED:
return "HIPRAND_STATUS_ALLOCATION_FAILED";
case HIPRAND_STATUS_TYPE_ERROR:
return "HIPRAND_STATUS_TYPE_ERROR";
case HIPRAND_STATUS_OUT_OF_RANGE:
return "HIPRAND_STATUS_OUT_OF_RANGE";
case HIPRAND_STATUS_LENGTH_NOT_MULTIPLE:
return "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE";
case HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case HIPRAND_STATUS_LAUNCH_FAILURE:
return "HIPRAND_STATUS_LAUNCH_FAILURE";
case HIPRAND_STATUS_PREEXISTING_FAILURE:
return "HIPRAND_STATUS_PREEXISTING_FAILURE";
case HIPRAND_STATUS_INITIALIZATION_FAILED:
return "HIPRAND_STATUS_INITIALIZATION_FAILED";
case HIPRAND_STATUS_ARCH_MISMATCH:
return "HIPRAND_STATUS_ARCH_MISMATCH";
case HIPRAND_STATUS_INTERNAL_ERROR:
return "HIPRAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error)
{
switch (error)
{
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
}
return "<unknown>";
}
#endif
#ifdef __DRIVER_TYPES_H__
#ifndef DEVICE_RESET
#define DEVICE_RESET hipDeviceReset();
#endif
#else
#ifndef DEVICE_RESET
#define DEVICE_RESET
#endif
#endif
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n",
file, line, static_cast<unsigned int>(result), _cudaGetErrorEnum(result), func);
DEVICE_RESET
// Make sure we call CUDA Device Reset before exiting
exit(EXIT_FAILURE);
}
}
#ifdef __DRIVER_TYPES_H__
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString(err));
DEVICE_RESET
exit(EXIT_FAILURE);
}
}
#endif
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[7].Cores);
return nGpuArchCoresPerSM[7].Cores;
}
// end of GPU Architecture definitions
#ifdef __CUDA_RUNTIME_H__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID)
{
int device_count;
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0)
{
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0)
{
devID = 0;
}
if (devID > device_count-1)
{
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", device_count);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
return -1;
}
if (deviceProp.major < 1)
{
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipSetDevice(devID));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
unsigned long long max_compute_perf = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&device_count);
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0)
{
fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best major SM Architecture GPU device
while (current_device < device_count)
{
hipGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != hipComputeModeProhibited)
{
if (deviceProp.major > 0 && deviceProp.major < 9999)
{
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count)
{
hipGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != hipComputeModeProhibited)
{
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
unsigned long long compute_perf = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf)
{
// If we find GPU with SM major > 2, search only these
if (best_SM_arch > 2)
{
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch)
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
else
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device"))
{
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0)
{
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
}
else
{
devID = gpuDeviceInit(devID);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
}
else
{
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors(hipSetDevice(devID));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
return devID;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev;
checkCudaErrors(hipGetDevice(&dev));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if ((deviceProp.major > major_version) ||
(deviceProp.major == major_version && deviceProp.minor >= minor_version))
{
printf(" GPU Device %d: <%16s >, Compute SM %d.%d detected\n", dev, deviceProp.name, deviceProp.major, deviceProp.minor);
return true;
}
else
{
printf(" No GPU device was found that can support CUDA compute capability %d.%d.\n", major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif
| c077f867f48f441033c61b8b38c89724f7ac51be.cu | #include "hip_runtime.h"
/**
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef HELPER_CUDA_H
#define HELPER_CUDA_H
#pragma once
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "helper_string.h"
/*
inline void __ExitInTime(int seconds)
{
fprintf(stdout, "> exiting in %d seconds: ", seconds);
fflush(stdout);
time_t t;
int count;
for (t=time(0)+seconds, count=seconds; time(0) < t; count--) {
fprintf(stdout, "%d...", count);
#if defined(WIN32)
Sleep(1000);
#else
sleep(1);
#endif
}
fprintf(stdout,"done!\n\n");
fflush(stdout);
}
#define EXIT_TIME_DELAY 2
inline void EXIT_DELAY(int return_code)
{
__ExitInTime(EXIT_TIME_DELAY);
exit(return_code);
}
*/
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header files, please
// refer the CUDA examples for examples of the needed CUDA headers, which may change depending
// on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DRIVER_TYPES_H__
static const char *_cudaGetErrorEnum(hipError_t error)
{
switch (error)
{
case hipSuccess:
return "hipSuccess";
case cudaErrorMissingConfiguration:
return "cudaErrorMissingConfiguration";
case cudaErrorMemoryAllocation:
return "cudaErrorMemoryAllocation";
case cudaErrorInitializationError:
return "cudaErrorInitializationError";
case cudaErrorLaunchFailure:
return "cudaErrorLaunchFailure";
case cudaErrorPriorLaunchFailure:
return "cudaErrorPriorLaunchFailure";
case cudaErrorLaunchTimeout:
return "cudaErrorLaunchTimeout";
case cudaErrorLaunchOutOfResources:
return "cudaErrorLaunchOutOfResources";
case cudaErrorInvalidDeviceFunction:
return "cudaErrorInvalidDeviceFunction";
case cudaErrorInvalidConfiguration:
return "cudaErrorInvalidConfiguration";
case cudaErrorInvalidDevice:
return "cudaErrorInvalidDevice";
case cudaErrorInvalidValue:
return "cudaErrorInvalidValue";
case cudaErrorInvalidPitchValue:
return "cudaErrorInvalidPitchValue";
case cudaErrorInvalidSymbol:
return "cudaErrorInvalidSymbol";
case cudaErrorMapBufferObjectFailed:
return "cudaErrorMapBufferObjectFailed";
case cudaErrorUnmapBufferObjectFailed:
return "cudaErrorUnmapBufferObjectFailed";
case cudaErrorInvalidHostPointer:
return "cudaErrorInvalidHostPointer";
case cudaErrorInvalidDevicePointer:
return "cudaErrorInvalidDevicePointer";
case cudaErrorInvalidTexture:
return "cudaErrorInvalidTexture";
case cudaErrorInvalidTextureBinding:
return "cudaErrorInvalidTextureBinding";
case cudaErrorInvalidChannelDescriptor:
return "cudaErrorInvalidChannelDescriptor";
case cudaErrorInvalidMemcpyDirection:
return "cudaErrorInvalidMemcpyDirection";
case cudaErrorAddressOfConstant:
return "cudaErrorAddressOfConstant";
case cudaErrorTextureFetchFailed:
return "cudaErrorTextureFetchFailed";
case cudaErrorTextureNotBound:
return "cudaErrorTextureNotBound";
case cudaErrorSynchronizationError:
return "cudaErrorSynchronizationError";
case cudaErrorInvalidFilterSetting:
return "cudaErrorInvalidFilterSetting";
case cudaErrorInvalidNormSetting:
return "cudaErrorInvalidNormSetting";
case cudaErrorMixedDeviceExecution:
return "cudaErrorMixedDeviceExecution";
case cudaErrorCudartUnloading:
return "cudaErrorCudartUnloading";
case cudaErrorUnknown:
return "cudaErrorUnknown";
case cudaErrorNotYetImplemented:
return "cudaErrorNotYetImplemented";
case cudaErrorMemoryValueTooLarge:
return "cudaErrorMemoryValueTooLarge";
case cudaErrorInvalidResourceHandle:
return "cudaErrorInvalidResourceHandle";
case cudaErrorNotReady:
return "cudaErrorNotReady";
case cudaErrorInsufficientDriver:
return "cudaErrorInsufficientDriver";
case cudaErrorSetOnActiveProcess:
return "cudaErrorSetOnActiveProcess";
case cudaErrorInvalidSurface:
return "cudaErrorInvalidSurface";
case cudaErrorNoDevice:
return "cudaErrorNoDevice";
case cudaErrorECCUncorrectable:
return "cudaErrorECCUncorrectable";
case cudaErrorSharedObjectSymbolNotFound:
return "cudaErrorSharedObjectSymbolNotFound";
case cudaErrorSharedObjectInitFailed:
return "cudaErrorSharedObjectInitFailed";
case cudaErrorUnsupportedLimit:
return "cudaErrorUnsupportedLimit";
case cudaErrorDuplicateVariableName:
return "cudaErrorDuplicateVariableName";
case cudaErrorDuplicateTextureName:
return "cudaErrorDuplicateTextureName";
case cudaErrorDuplicateSurfaceName:
return "cudaErrorDuplicateSurfaceName";
case cudaErrorDevicesUnavailable:
return "cudaErrorDevicesUnavailable";
case cudaErrorInvalidKernelImage:
return "cudaErrorInvalidKernelImage";
case cudaErrorNoKernelImageForDevice:
return "cudaErrorNoKernelImageForDevice";
case cudaErrorIncompatibleDriverContext:
return "cudaErrorIncompatibleDriverContext";
case cudaErrorPeerAccessAlreadyEnabled:
return "cudaErrorPeerAccessAlreadyEnabled";
case cudaErrorPeerAccessNotEnabled:
return "cudaErrorPeerAccessNotEnabled";
case cudaErrorDeviceAlreadyInUse:
return "cudaErrorDeviceAlreadyInUse";
case cudaErrorProfilerDisabled:
return "cudaErrorProfilerDisabled";
case cudaErrorProfilerNotInitialized:
return "cudaErrorProfilerNotInitialized";
case cudaErrorProfilerAlreadyStarted:
return "cudaErrorProfilerAlreadyStarted";
case cudaErrorProfilerAlreadyStopped:
return "cudaErrorProfilerAlreadyStopped";
#if __CUDA_API_VERSION >= 0x4000
case cudaErrorAssert:
return "cudaErrorAssert";
case cudaErrorTooManyPeers:
return "cudaErrorTooManyPeers";
case cudaErrorHostMemoryAlreadyRegistered:
return "cudaErrorHostMemoryAlreadyRegistered";
case cudaErrorHostMemoryNotRegistered:
return "cudaErrorHostMemoryNotRegistered";
#endif
case cudaErrorStartupFailure:
return "cudaErrorStartupFailure";
case cudaErrorApiFailureBase:
return "cudaErrorApiFailureBase";
}
return "<unknown>";
}
#endif
#ifdef __cuda_cuda_h__
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error)
{
switch (error)
{
case CUDA_SUCCESS:
return "CUDA_SUCCESS";
case CUDA_ERROR_INVALID_VALUE:
return "CUDA_ERROR_INVALID_VALUE";
case CUDA_ERROR_OUT_OF_MEMORY:
return "CUDA_ERROR_OUT_OF_MEMORY";
case CUDA_ERROR_NOT_INITIALIZED:
return "CUDA_ERROR_NOT_INITIALIZED";
case CUDA_ERROR_DEINITIALIZED:
return "CUDA_ERROR_DEINITIALIZED";
case CUDA_ERROR_PROFILER_DISABLED:
return "CUDA_ERROR_PROFILER_DISABLED";
case CUDA_ERROR_PROFILER_NOT_INITIALIZED:
return "CUDA_ERROR_PROFILER_NOT_INITIALIZED";
case CUDA_ERROR_PROFILER_ALREADY_STARTED:
return "CUDA_ERROR_PROFILER_ALREADY_STARTED";
case CUDA_ERROR_PROFILER_ALREADY_STOPPED:
return "CUDA_ERROR_PROFILER_ALREADY_STOPPED";
case CUDA_ERROR_NO_DEVICE:
return "CUDA_ERROR_NO_DEVICE";
case CUDA_ERROR_INVALID_DEVICE:
return "CUDA_ERROR_INVALID_DEVICE";
case CUDA_ERROR_INVALID_IMAGE:
return "CUDA_ERROR_INVALID_IMAGE";
case CUDA_ERROR_INVALID_CONTEXT:
return "CUDA_ERROR_INVALID_CONTEXT";
case CUDA_ERROR_CONTEXT_ALREADY_CURRENT:
return "CUDA_ERROR_CONTEXT_ALREADY_CURRENT";
case CUDA_ERROR_MAP_FAILED:
return "CUDA_ERROR_MAP_FAILED";
case CUDA_ERROR_UNMAP_FAILED:
return "CUDA_ERROR_UNMAP_FAILED";
case CUDA_ERROR_ARRAY_IS_MAPPED:
return "CUDA_ERROR_ARRAY_IS_MAPPED";
case CUDA_ERROR_ALREADY_MAPPED:
return "CUDA_ERROR_ALREADY_MAPPED";
case CUDA_ERROR_NO_BINARY_FOR_GPU:
return "CUDA_ERROR_NO_BINARY_FOR_GPU";
case CUDA_ERROR_ALREADY_ACQUIRED:
return "CUDA_ERROR_ALREADY_ACQUIRED";
case CUDA_ERROR_NOT_MAPPED:
return "CUDA_ERROR_NOT_MAPPED";
case CUDA_ERROR_NOT_MAPPED_AS_ARRAY:
return "CUDA_ERROR_NOT_MAPPED_AS_ARRAY";
case CUDA_ERROR_NOT_MAPPED_AS_POINTER:
return "CUDA_ERROR_NOT_MAPPED_AS_POINTER";
case CUDA_ERROR_ECC_UNCORRECTABLE:
return "CUDA_ERROR_ECC_UNCORRECTABLE";
case CUDA_ERROR_UNSUPPORTED_LIMIT:
return "CUDA_ERROR_UNSUPPORTED_LIMIT";
case CUDA_ERROR_CONTEXT_ALREADY_IN_USE:
return "CUDA_ERROR_CONTEXT_ALREADY_IN_USE";
case CUDA_ERROR_INVALID_SOURCE:
return "CUDA_ERROR_INVALID_SOURCE";
case CUDA_ERROR_FILE_NOT_FOUND:
return "CUDA_ERROR_FILE_NOT_FOUND";
case CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND:
return "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND";
case CUDA_ERROR_SHARED_OBJECT_INIT_FAILED:
return "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED";
case CUDA_ERROR_OPERATING_SYSTEM:
return "CUDA_ERROR_OPERATING_SYSTEM";
case CUDA_ERROR_INVALID_HANDLE:
return "CUDA_ERROR_INVALID_HANDLE";
case CUDA_ERROR_NOT_FOUND:
return "CUDA_ERROR_NOT_FOUND";
case CUDA_ERROR_NOT_READY:
return "CUDA_ERROR_NOT_READY";
case CUDA_ERROR_LAUNCH_FAILED:
return "CUDA_ERROR_LAUNCH_FAILED";
case CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES:
return "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES";
case CUDA_ERROR_LAUNCH_TIMEOUT:
return "CUDA_ERROR_LAUNCH_TIMEOUT";
case CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING:
return "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING";
case CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED:
return "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED";
case CUDA_ERROR_PEER_ACCESS_NOT_ENABLED:
return "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED";
case CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE:
return "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE";
case CUDA_ERROR_CONTEXT_IS_DESTROYED:
return "CUDA_ERROR_CONTEXT_IS_DESTROYED";
case CUDA_ERROR_ASSERT:
return "CUDA_ERROR_ASSERT";
case CUDA_ERROR_TOO_MANY_PEERS:
return "CUDA_ERROR_TOO_MANY_PEERS";
case CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED:
return "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED";
case CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED:
return "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED";
case CUDA_ERROR_UNKNOWN:
return "CUDA_ERROR_UNKNOWN";
}
return "<unknown>";
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(curandStatus_t error)
{
switch (error)
{
case CURAND_STATUS_SUCCESS:
return "CURAND_STATUS_SUCCESS";
case CURAND_STATUS_VERSION_MISMATCH:
return "CURAND_STATUS_VERSION_MISMATCH";
case CURAND_STATUS_NOT_INITIALIZED:
return "CURAND_STATUS_NOT_INITIALIZED";
case CURAND_STATUS_ALLOCATION_FAILED:
return "CURAND_STATUS_ALLOCATION_FAILED";
case CURAND_STATUS_TYPE_ERROR:
return "CURAND_STATUS_TYPE_ERROR";
case CURAND_STATUS_OUT_OF_RANGE:
return "CURAND_STATUS_OUT_OF_RANGE";
case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case CURAND_STATUS_LAUNCH_FAILURE:
return "CURAND_STATUS_LAUNCH_FAILURE";
case CURAND_STATUS_PREEXISTING_FAILURE:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case CURAND_STATUS_INITIALIZATION_FAILED:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case CURAND_STATUS_ARCH_MISMATCH:
return "CURAND_STATUS_ARCH_MISMATCH";
case CURAND_STATUS_INTERNAL_ERROR:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error)
{
switch (error)
{
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
}
return "<unknown>";
}
#endif
#ifdef __DRIVER_TYPES_H__
#ifndef DEVICE_RESET
#define DEVICE_RESET hipDeviceReset();
#endif
#else
#ifndef DEVICE_RESET
#define DEVICE_RESET
#endif
#endif
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n",
file, line, static_cast<unsigned int>(result), _cudaGetErrorEnum(result), func);
DEVICE_RESET
// Make sure we call CUDA Device Reset before exiting
exit(EXIT_FAILURE);
}
}
#ifdef __DRIVER_TYPES_H__
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString(err));
DEVICE_RESET
exit(EXIT_FAILURE);
}
}
#endif
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[7].Cores);
return nGpuArchCoresPerSM[7].Cores;
}
// end of GPU Architecture definitions
#ifdef __CUDA_RUNTIME_H__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID)
{
int device_count;
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0)
{
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0)
{
devID = 0;
}
if (devID > device_count-1)
{
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", device_count);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
return -1;
}
if (deviceProp.major < 1)
{
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipSetDevice(devID));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
unsigned long long max_compute_perf = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&device_count);
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0)
{
fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best major SM Architecture GPU device
while (current_device < device_count)
{
hipGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != cudaComputeModeProhibited)
{
if (deviceProp.major > 0 && deviceProp.major < 9999)
{
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count)
{
hipGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != cudaComputeModeProhibited)
{
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
unsigned long long compute_perf = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf)
{
// If we find GPU with SM major > 2, search only these
if (best_SM_arch > 2)
{
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch)
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
else
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device"))
{
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0)
{
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
}
else
{
devID = gpuDeviceInit(devID);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
}
else
{
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors(hipSetDevice(devID));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
return devID;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev;
checkCudaErrors(hipGetDevice(&dev));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if ((deviceProp.major > major_version) ||
(deviceProp.major == major_version && deviceProp.minor >= minor_version))
{
printf(" GPU Device %d: <%16s >, Compute SM %d.%d detected\n", dev, deviceProp.name, deviceProp.major, deviceProp.minor);
return true;
}
else
{
printf(" No GPU device was found that can support CUDA compute capability %d.%d.\n", major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif
|
38a9f14350bd725cd275163bcf040c57d335def9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "student.hpp"
#include <algorithm>
#include "common.hpp"
#include "chronoGPU.hpp"
#include "chronoCPU.hpp"
namespace IMAC {
void configureKernelSize(const size_t arraySize, int& threadsByBlock, int& blockNumber) {
threadsByBlock = umin(nextPow2<uint>(arraySize), MAX_NB_THREADS);
blockNumber = arraySize/(2*threadsByBlock)+2;
}
__device__ void warpReduce(volatile uint* shared, int i) {
shared[i] = umax(shared[i], shared[i + 32]);
shared[i] = umax(shared[i], shared[i + 16]);
shared[i] = umax(shared[i], shared[i + 16]);
shared[i] = umax(shared[i], shared[i + 8]);
shared[i] = umax(shared[i], shared[i + 4]);
shared[i] = umax(shared[i], shared[i + 2]);
shared[i] = umax(shared[i], shared[i + 1]);
}
__global__
void reduceKernel(const uint* array, const uint size, uint* partialOut) {
extern __shared__ uint shared[];
unsigned int localIdx = threadIdx.x;
unsigned int globalIdx = localIdx + blockIdx.x * blockDim.x * 2;
// copy data in shared memory and do first level of reduction
if(globalIdx < size) {
shared[localIdx] = array[globalIdx];
if(globalIdx + blockDim.x < size)
shared[localIdx] = umax(shared[localIdx], array[globalIdx + blockDim.x]);
} else {
shared[localIdx] = 0; // all other elements to 0
}
__syncthreads();
// reduction
for (unsigned int s=blockDim.x/2; s>32; s >>= 1) {
if (localIdx < s) {
shared[localIdx] = max(shared[localIdx], shared[localIdx + s]);
}
__syncthreads();
}
if (localIdx < 32) warpReduce(shared, localIdx);
// write result for this block
if (localIdx == 0) partialOut[blockIdx.x] = shared[0];
}
void studentJob(const std::vector<uint>& array, const uint resCPU, const uint nbIterations) {
// resCPUJust for comparison
uint *dev_array = nullptr;
uint *dev_partialMax = nullptr;
HANDLE_ERROR(hipMalloc((void**)&dev_array, array.size() * sizeof(uint)));
HANDLE_ERROR(hipMemcpy(dev_array, array.data(), array.size() * sizeof(uint), hipMemcpyHostToDevice));
// Configure kernel
int threadsByBlock, blockNumber;
configureKernelSize(array.size(), threadsByBlock, blockNumber);
verifyDim(blockNumber, threadsByBlock);
// alloc host and dev partial array
std::vector<uint> host_partialMax(blockNumber, 0);
HANDLE_ERROR(hipMalloc((void**) &dev_partialMax, host_partialMax.size() * sizeof(uint) ) );
const size_t bytesSharedMem = threadsByBlock * sizeof(uint);
std::cout << "Process on GPU (" << nbIterations << " iterations Avg)" << std::endl;
std::cout << "Computing on " << blockNumber << " block(s) and " << threadsByBlock << " thread(s) " <<"- shared mem size = " << bytesSharedMem << std::endl;
float2 timing = { 0.f, 0.f }; // x: timing GPU, y: timing CPU
ChronoGPU chrGPU;
chrGPU.start();
for (size_t i = 0; i < nbIterations; ++i) { // Average timing on 'loop' iterations
hipLaunchKernelGGL(( reduceKernel), dim3(blockNumber), dim3(threadsByBlock), bytesSharedMem, 0, dev_array, array.size(), dev_partialMax);
}
chrGPU.stop();
timing.x = chrGPU.elapsedTime() / nbIterations;
// Retrieve partial result from device to host
HANDLE_ERROR(hipMemcpy(host_partialMax.data(), dev_partialMax, host_partialMax.size() * sizeof(uint), hipMemcpyDeviceToHost));
// Free array on GPU
hipFree(dev_partialMax);
hipFree(dev_array);
// Check for error
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
throw std::runtime_error(hipGetErrorString(err));
}
uint result;
ChronoCPU chrCPU;
chrCPU.start();
for (uint i = 0; i < nbIterations; ++i) {
result = *std::max_element(host_partialMax.begin(), host_partialMax.end());
}
chrCPU.stop();
timing.y = chrCPU.elapsedTime() / nbIterations;
printTiming(timing);
compare(result, resCPU); // Compare results
}
}
| 38a9f14350bd725cd275163bcf040c57d335def9.cu | #include "student.hpp"
#include <algorithm>
#include "common.hpp"
#include "chronoGPU.hpp"
#include "chronoCPU.hpp"
namespace IMAC {
void configureKernelSize(const size_t arraySize, int& threadsByBlock, int& blockNumber) {
threadsByBlock = umin(nextPow2<uint>(arraySize), MAX_NB_THREADS);
blockNumber = arraySize/(2*threadsByBlock)+2;
}
__device__ void warpReduce(volatile uint* shared, int i) {
shared[i] = umax(shared[i], shared[i + 32]);
shared[i] = umax(shared[i], shared[i + 16]);
shared[i] = umax(shared[i], shared[i + 16]);
shared[i] = umax(shared[i], shared[i + 8]);
shared[i] = umax(shared[i], shared[i + 4]);
shared[i] = umax(shared[i], shared[i + 2]);
shared[i] = umax(shared[i], shared[i + 1]);
}
__global__
void reduceKernel(const uint* array, const uint size, uint* partialOut) {
extern __shared__ uint shared[];
unsigned int localIdx = threadIdx.x;
unsigned int globalIdx = localIdx + blockIdx.x * blockDim.x * 2;
// copy data in shared memory and do first level of reduction
if(globalIdx < size) {
shared[localIdx] = array[globalIdx];
if(globalIdx + blockDim.x < size)
shared[localIdx] = umax(shared[localIdx], array[globalIdx + blockDim.x]);
} else {
shared[localIdx] = 0; // all other elements to 0
}
__syncthreads();
// reduction
for (unsigned int s=blockDim.x/2; s>32; s >>= 1) {
if (localIdx < s) {
shared[localIdx] = max(shared[localIdx], shared[localIdx + s]);
}
__syncthreads();
}
if (localIdx < 32) warpReduce(shared, localIdx);
// write result for this block
if (localIdx == 0) partialOut[blockIdx.x] = shared[0];
}
void studentJob(const std::vector<uint>& array, const uint resCPU, const uint nbIterations) {
// resCPUJust for comparison
uint *dev_array = nullptr;
uint *dev_partialMax = nullptr;
HANDLE_ERROR(cudaMalloc((void**)&dev_array, array.size() * sizeof(uint)));
HANDLE_ERROR(cudaMemcpy(dev_array, array.data(), array.size() * sizeof(uint), cudaMemcpyHostToDevice));
// Configure kernel
int threadsByBlock, blockNumber;
configureKernelSize(array.size(), threadsByBlock, blockNumber);
verifyDim(blockNumber, threadsByBlock);
// alloc host and dev partial array
std::vector<uint> host_partialMax(blockNumber, 0);
HANDLE_ERROR(cudaMalloc((void**) &dev_partialMax, host_partialMax.size() * sizeof(uint) ) );
const size_t bytesSharedMem = threadsByBlock * sizeof(uint);
std::cout << "Process on GPU (" << nbIterations << " iterations Avg)" << std::endl;
std::cout << "Computing on " << blockNumber << " block(s) and " << threadsByBlock << " thread(s) " <<"- shared mem size = " << bytesSharedMem << std::endl;
float2 timing = { 0.f, 0.f }; // x: timing GPU, y: timing CPU
ChronoGPU chrGPU;
chrGPU.start();
for (size_t i = 0; i < nbIterations; ++i) { // Average timing on 'loop' iterations
reduceKernel<<<blockNumber, threadsByBlock, bytesSharedMem>>>(dev_array, array.size(), dev_partialMax);
}
chrGPU.stop();
timing.x = chrGPU.elapsedTime() / nbIterations;
// Retrieve partial result from device to host
HANDLE_ERROR(cudaMemcpy(host_partialMax.data(), dev_partialMax, host_partialMax.size() * sizeof(uint), cudaMemcpyDeviceToHost));
// Free array on GPU
cudaFree(dev_partialMax);
cudaFree(dev_array);
// Check for error
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(cudaGetErrorString(err));
}
uint result;
ChronoCPU chrCPU;
chrCPU.start();
for (uint i = 0; i < nbIterations; ++i) {
result = *std::max_element(host_partialMax.begin(), host_partialMax.end());
}
chrCPU.stop();
timing.y = chrCPU.elapsedTime() / nbIterations;
printTiming(timing);
compare(result, resCPU); // Compare results
}
}
|
999f2fab99ec554ba538d5253c739f70751570e1.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/power_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void PowerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// Special case where we can ignore the input: scale or power is 0.
if (diff_scale_ == Dtype(0)) {
Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_);
caffe_gpu_set(count, value, top_data);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
caffe_copy(count, bottom_data, top_data);
if (scale_ != Dtype(1)) {
caffe_gpu_scal(count, scale_, top_data);
}
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, shift_, top_data);
}
if (power_ != Dtype(1)) {
caffe_gpu_powx(count, top_data, power_, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (diff_scale_ == Dtype(0)) {
Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_);
greentea_gpu_set<Dtype>(this->device_->id(), count, value,
(cl_mem) top_data, 0);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
if (scale_ != Dtype(1)) {
greentea_gpu_scal(this->device_->id(), count, scale_,
(cl_mem) top_data, 0);
}
if (shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count, shift_,
(cl_mem) top_data, 0);
}
if (power_ != Dtype(1)) {
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, power_, (cl_mem) top_data,
0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void PowerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
caffe_gpu_set(count, diff_scale_, bottom_diff);
} else {
const Dtype* bottom_data = bottom[0]->gpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
// Special case for y = (shift + scale * x)^2
// -> dy/dx = 2 * scale * (shift + scale * x)
// = diff_scale * shift + diff_scale * scale * x
caffe_gpu_axpby(count, diff_scale_ * scale_, bottom_data, Dtype(0),
bottom_diff);
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff);
}
} else if (shift_ == Dtype(0)) {
// Special case for y = (scale * x)^power
// -> dy/dx = scale * power * (scale * x)^(power - 1)
// = scale * power * (scale * x)^power * (scale * x)^(-1)
// = power * y / x
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
caffe_gpu_scal(count, power_, bottom_diff);
} else {
caffe_copy(count, bottom_data, bottom_diff);
if (scale_ != Dtype(1)) {
caffe_gpu_scal(count, scale_, bottom_diff);
}
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, shift_, bottom_diff);
}
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_div<Dtype>(count, top_data, bottom_diff, bottom_diff);
if (diff_scale_ != Dtype(1)) {
caffe_gpu_scal(count, diff_scale_, bottom_diff);
}
}
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
greentea_gpu_set<Dtype>(this->device_->id(), count, diff_scale_,
(cl_mem) bottom_diff, 0);
} else {
const Dtype* bottom_data = bottom[0]->gpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
// Special case for y = (shift + scale * x)^2
// -> dy/dx = 2 * scale * (shift + scale * x)
// = diff_scale * shift + diff_scale * scale * x
greentea_gpu_axpby(this->device_->id(), count,
diff_scale_ * scale_, (cl_mem) bottom_data, 0,
Dtype(0), (cl_mem) bottom_diff, 0);
if (shift_ != Dtype(0)) {
greentea_gpu_add_scalar(this->device_->id(), count,
diff_scale_ * shift_, (cl_mem) bottom_diff,
0);
}
} else if (shift_ == Dtype(0)) {
// Special case for y = (scale * x)^power
// -> dy/dx = scale * power * (scale * x)^(power - 1)
// = scale * power * (scale * x)^power * (scale * x)^(-1)
// = power * y / x
const Dtype* top_data = top[0]->gpu_data();
greentea_gpu_div<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) bottom_data, 0,
(cl_mem) bottom_diff, 0);
greentea_gpu_scal<Dtype>(this->device_->id(), count, power_,
(cl_mem) bottom_diff, 0);
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0,
(cl_mem) bottom_diff, 0, &ctx);
if (scale_ != Dtype(1)) {
greentea_gpu_scal(this->device_->id(), count, scale_,
(cl_mem) bottom_diff, 0);
}
if (shift_ != Dtype(0)) {
greentea_gpu_add_scalar(this->device_->id(), count, shift_,
(cl_mem) bottom_diff, 0);
}
const Dtype* top_data = top[0]->gpu_data();
greentea_gpu_div<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
if (diff_scale_ != Dtype(1)) {
greentea_gpu_scal(this->device_->id(), count, diff_scale_,
(cl_mem) bottom_diff, 0);
}
}
}
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PowerLayer);
} // namespace caffe
| 999f2fab99ec554ba538d5253c739f70751570e1.cu | #include <vector>
#include "caffe/layers/power_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void PowerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// Special case where we can ignore the input: scale or power is 0.
if (diff_scale_ == Dtype(0)) {
Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_);
caffe_gpu_set(count, value, top_data);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
caffe_copy(count, bottom_data, top_data);
if (scale_ != Dtype(1)) {
caffe_gpu_scal(count, scale_, top_data);
}
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, shift_, top_data);
}
if (power_ != Dtype(1)) {
caffe_gpu_powx(count, top_data, power_, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (diff_scale_ == Dtype(0)) {
Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_);
greentea_gpu_set<Dtype>(this->device_->id(), count, value,
(cl_mem) top_data, 0);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
if (scale_ != Dtype(1)) {
greentea_gpu_scal(this->device_->id(), count, scale_,
(cl_mem) top_data, 0);
}
if (shift_ != Dtype(0)) {
greentea_gpu_add_scalar<Dtype>(this->device_->id(), count, shift_,
(cl_mem) top_data, 0);
}
if (power_ != Dtype(1)) {
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, power_, (cl_mem) top_data,
0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void PowerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
caffe_gpu_set(count, diff_scale_, bottom_diff);
} else {
const Dtype* bottom_data = bottom[0]->gpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
// Special case for y = (shift + scale * x)^2
// -> dy/dx = 2 * scale * (shift + scale * x)
// = diff_scale * shift + diff_scale * scale * x
caffe_gpu_axpby(count, diff_scale_ * scale_, bottom_data, Dtype(0),
bottom_diff);
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff);
}
} else if (shift_ == Dtype(0)) {
// Special case for y = (scale * x)^power
// -> dy/dx = scale * power * (scale * x)^(power - 1)
// = scale * power * (scale * x)^power * (scale * x)^(-1)
// = power * y / x
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
caffe_gpu_scal(count, power_, bottom_diff);
} else {
caffe_copy(count, bottom_data, bottom_diff);
if (scale_ != Dtype(1)) {
caffe_gpu_scal(count, scale_, bottom_diff);
}
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, shift_, bottom_diff);
}
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_div<Dtype>(count, top_data, bottom_diff, bottom_diff);
if (diff_scale_ != Dtype(1)) {
caffe_gpu_scal(count, diff_scale_, bottom_diff);
}
}
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
greentea_gpu_set<Dtype>(this->device_->id(), count, diff_scale_,
(cl_mem) bottom_diff, 0);
} else {
const Dtype* bottom_data = bottom[0]->gpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
// Special case for y = (shift + scale * x)^2
// -> dy/dx = 2 * scale * (shift + scale * x)
// = diff_scale * shift + diff_scale * scale * x
greentea_gpu_axpby(this->device_->id(), count,
diff_scale_ * scale_, (cl_mem) bottom_data, 0,
Dtype(0), (cl_mem) bottom_diff, 0);
if (shift_ != Dtype(0)) {
greentea_gpu_add_scalar(this->device_->id(), count,
diff_scale_ * shift_, (cl_mem) bottom_diff,
0);
}
} else if (shift_ == Dtype(0)) {
// Special case for y = (scale * x)^power
// -> dy/dx = scale * power * (scale * x)^(power - 1)
// = scale * power * (scale * x)^power * (scale * x)^(-1)
// = power * y / x
const Dtype* top_data = top[0]->gpu_data();
greentea_gpu_div<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) bottom_data, 0,
(cl_mem) bottom_diff, 0);
greentea_gpu_scal<Dtype>(this->device_->id(), count, power_,
(cl_mem) bottom_diff, 0);
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0,
(cl_mem) bottom_diff, 0, &ctx);
if (scale_ != Dtype(1)) {
greentea_gpu_scal(this->device_->id(), count, scale_,
(cl_mem) bottom_diff, 0);
}
if (shift_ != Dtype(0)) {
greentea_gpu_add_scalar(this->device_->id(), count, shift_,
(cl_mem) bottom_diff, 0);
}
const Dtype* top_data = top[0]->gpu_data();
greentea_gpu_div<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
if (diff_scale_ != Dtype(1)) {
greentea_gpu_scal(this->device_->id(), count, diff_scale_,
(cl_mem) bottom_diff, 0);
}
}
}
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PowerLayer);
} // namespace caffe
|
b32fe8bc0727c5986ce33fb8db90e3a4bd96306d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define LSIZE 31
#define MIN_LIM 12.0
#define MAX_LIM 30.0
void check_input(int argc,char* argv[]);
__global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines);
long calc_lines(char *filename);
int main(int argc,char * argv[])
{
check_input(argc,argv); // Check cmd inputs
char *filename=argv[3]; // Variable initialization
int coll = atoi(argv[1]);
int exec_time=atoi(argv[2]);
int threads=atoi(argv[4]);
int BLOCKSIZE = atoi(argv[5]);
long loop_count;
loop_count =calc_lines(filename); // Count the lines of input file
FILE *input=fopen(filename,"r"); // Open file with file descriptor
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,0); // Get gpu's properties information
if(coll != -1) // Handle max_collisions argument
{
if(coll>loop_count)
{
printf("[!] Warning: Specified collisions to be tested exceed the ones in input file\n");
printf("[!] Setting the number of collisions to the maximum (taken from input file)\n");
}
else
{
if (coll<0) return 1;
loop_count = coll;
}
}
if (BLOCKSIZE==-1) // Handle blocksize argument
{
BLOCKSIZE=512; // A default value
}
else
{
if (BLOCKSIZE%prop.warpSize!=0 || BLOCKSIZE<=0)
{
printf("[-]Block_size must be a positive multiple of gpu's warp_size %d \n",prop.warpSize );
return 5;
}
}
if (threads!=-1) // Handle threads argument
{
if (threads<=0) return 4;
if (threads%BLOCKSIZE!=0)
{
threads=(threads/BLOCKSIZE)*BLOCKSIZE;
}
}
else
{
threads=prop.maxThreadsPerMultiProcessor*prop.multiProcessorCount;
}
// Print some information [ Usefull for debugging ]
printf("[+] GPU-model: %s\tTotal GPU memory %ld MB \n",prop.name,prop.totalGlobalMem/(1024*1024) );
printf("[!] You are trying to allocate %ld MBs of memmory on CPU-RAM and GPU-GlobalMem\n",threads*3*sizeof(float)/(1024*1024) );
printf("[+] Launching %d GPU-Threads with BlockSize %d\n",threads,BLOCKSIZE );
// Initialize CUDA WallClock-time counters as events
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 blockSize(BLOCKSIZE); // Declare CUDA Block size explicitly
dim3 gridSize(threads/BLOCKSIZE); // Declare CUDA Grid size explicitly
float *h_coordinates=(float * )malloc(3*threads*sizeof(float)); // allocate Host memmory for elements to be read from file
float *d_coordinates;
int *d_coords_within,*h_coords_within=(int*)malloc(sizeof(int)); // allocate Host memmory for the counter of coordinates in area of interest
*h_coords_within=0;
// Allocate memmory on CUDA capable Device for:
hipMalloc(&d_coordinates,3*threads*sizeof(float)); // input file's coordinates
hipMalloc(&d_coords_within,sizeof(int)); // coordinates counter
hipMemcpy(d_coords_within,h_coords_within,sizeof(int),hipMemcpyHostToDevice); // Initialize the value of cuounter on Device
int i,j=0;
float time_elapsed = 0;
printf("[+] Working...\n" );
hipEventRecord(start); // Starting time reference
while(j<loop_count && (exec_time==-1?1:time_elapsed<exec_time)) // Main loop of the programm
{
if (j+threads>loop_count)
{
threads=loop_count-j;
hipFree(d_coordinates);
hipMalloc(&d_coordinates,3*threads*sizeof(float));
}
for(i=0;i<threads;i++)
{
fscanf(input,"%f %f %f",&h_coordinates[i*3],&h_coordinates[i*3+1],&h_coordinates[i*3+2]); // Read cooordinates from file
}
hipMemcpy(d_coordinates,h_coordinates,3*threads*sizeof(float),hipMemcpyHostToDevice); // Copy read cooordinates on Device
hipLaunchKernelGGL(( examine), dim3(gridSize),dim3(blockSize), 0, 0, d_coordinates,d_coords_within,3*threads); // Launch gpu kernel for calculations
hipEventRecord(stop); // Stop time reference
hipEventSynchronize(stop); // Block CPU until "stop" event is recorded
hipEventElapsedTime(&time_elapsed, start, stop); // Calculate the time elapsed in milliseconds
time_elapsed=time_elapsed/1000; // Convert milliseconds to seconds
j+=threads;
}
// Destroy CUDA timers
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(h_coords_within,d_coords_within,sizeof(int),hipMemcpyDeviceToHost); // Copy results from Device to Host
//Printing results
printf("[+] Main part of the program was being executed for :: %.3f :: sec)\n", time_elapsed);
printf("[+] %ld coordinates have been analyzed\n[+] %d cooordinates were inside the area of interest\n[+] %ld coordinates read per second\n", loop_count, *h_coords_within, (time_elapsed<1?loop_count:loop_count/(int)time_elapsed));
// Free Host and Device memory
hipFree(d_coordinates);
hipFree(d_coords_within);
fclose(input);
free(h_coordinates);
free(h_coords_within);
return 0;
}
__global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines)
{
int index=blockIdx.x*3*blockDim.x+3*threadIdx.x; // find the index of starting element for each thread on each block
float coord1=d_coordinates[index],coord2=d_coordinates[index+1],coord3=d_coordinates[index+2]; // Copy cooordinates from GPU's global memory to thread's local memory
if(index>=d_lines) return;
if(coord1 >= MIN_LIM && coord1 <= MAX_LIM && coord2 >= MIN_LIM && coord2 <= MAX_LIM && coord3 >= MIN_LIM && coord3 <= MAX_LIM)
{
// If the current coordinate is within the accepted limits,
atomicAdd((unsigned int*)d_coords_within,1); // So as threads do not mess up the values
}
}
void check_input(int argc,char *argv[]) // Handle number of arguments errors and show usage
{
if (argc<6 || argc>6)
{
printf("[-] Usage: ./examine [max_collisions] [max_exec_time] [input_file] [Threads] [1D_blockSize]\nUse \"-1\": for no boundies \n");
if (argc==2) if (!strcmp(argv[1],"--help"))
{
printf("max_collisions: Maximum number of collisions\nmax_exec_time: Maximum execution time\ninput_file: Filename to examine\nThreads: Number of gpu-threads to use / # Rows in memmory\n1D_blocksize: gpu-blocksize to use" );
printf("\t ======Usefull info!======\n");
printf("1) 1D_blockSize must be a multiple of 32. (or whatever warp_size is supported by your GPU)\n2) Threads should be a multiple of blockSize\n 3)These 2 parameters are important for performance\n" );
}
exit(2);
}
}
long calc_lines(char *filename) // Calculates the lines of input file
{
FILE *file=fopen(filename,"r");
fseek(file,0L,SEEK_END); //set file position indicator right to the end-of-file
long lines=ftell(file); //store the number of bytes since the beginning of the file
fseek(file,0L,SEEK_SET);
fclose(file);
return lines/LSIZE; //return lines count of the file
}
| b32fe8bc0727c5986ce33fb8db90e3a4bd96306d.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define LSIZE 31
#define MIN_LIM 12.0
#define MAX_LIM 30.0
void check_input(int argc,char* argv[]);
__global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines);
long calc_lines(char *filename);
int main(int argc,char * argv[])
{
check_input(argc,argv); // Check cmd inputs
char *filename=argv[3]; // Variable initialization
int coll = atoi(argv[1]);
int exec_time=atoi(argv[2]);
int threads=atoi(argv[4]);
int BLOCKSIZE = atoi(argv[5]);
long loop_count;
loop_count =calc_lines(filename); // Count the lines of input file
FILE *input=fopen(filename,"r"); // Open file with file descriptor
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0); // Get gpu's properties information
if(coll != -1) // Handle max_collisions argument
{
if(coll>loop_count)
{
printf("[!] Warning: Specified collisions to be tested exceed the ones in input file\n");
printf("[!] Setting the number of collisions to the maximum (taken from input file)\n");
}
else
{
if (coll<0) return 1;
loop_count = coll;
}
}
if (BLOCKSIZE==-1) // Handle blocksize argument
{
BLOCKSIZE=512; // A default value
}
else
{
if (BLOCKSIZE%prop.warpSize!=0 || BLOCKSIZE<=0)
{
printf("[-]Block_size must be a positive multiple of gpu's warp_size %d \n",prop.warpSize );
return 5;
}
}
if (threads!=-1) // Handle threads argument
{
if (threads<=0) return 4;
if (threads%BLOCKSIZE!=0)
{
threads=(threads/BLOCKSIZE)*BLOCKSIZE;
}
}
else
{
threads=prop.maxThreadsPerMultiProcessor*prop.multiProcessorCount;
}
// Print some information [ Usefull for debugging ]
printf("[+] GPU-model: %s\tTotal GPU memory %ld MB \n",prop.name,prop.totalGlobalMem/(1024*1024) );
printf("[!] You are trying to allocate %ld MBs of memmory on CPU-RAM and GPU-GlobalMem\n",threads*3*sizeof(float)/(1024*1024) );
printf("[+] Launching %d GPU-Threads with BlockSize %d\n",threads,BLOCKSIZE );
// Initialize CUDA WallClock-time counters as events
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 blockSize(BLOCKSIZE); // Declare CUDA Block size explicitly
dim3 gridSize(threads/BLOCKSIZE); // Declare CUDA Grid size explicitly
float *h_coordinates=(float * )malloc(3*threads*sizeof(float)); // allocate Host memmory for elements to be read from file
float *d_coordinates;
int *d_coords_within,*h_coords_within=(int*)malloc(sizeof(int)); // allocate Host memmory for the counter of coordinates in area of interest
*h_coords_within=0;
// Allocate memmory on CUDA capable Device for:
cudaMalloc(&d_coordinates,3*threads*sizeof(float)); // input file's coordinates
cudaMalloc(&d_coords_within,sizeof(int)); // coordinates counter
cudaMemcpy(d_coords_within,h_coords_within,sizeof(int),cudaMemcpyHostToDevice); // Initialize the value of cuounter on Device
int i,j=0;
float time_elapsed = 0;
printf("[+] Working...\n" );
cudaEventRecord(start); // Starting time reference
while(j<loop_count && (exec_time==-1?1:time_elapsed<exec_time)) // Main loop of the programm
{
if (j+threads>loop_count)
{
threads=loop_count-j;
cudaFree(d_coordinates);
cudaMalloc(&d_coordinates,3*threads*sizeof(float));
}
for(i=0;i<threads;i++)
{
fscanf(input,"%f %f %f",&h_coordinates[i*3],&h_coordinates[i*3+1],&h_coordinates[i*3+2]); // Read cooordinates from file
}
cudaMemcpy(d_coordinates,h_coordinates,3*threads*sizeof(float),cudaMemcpyHostToDevice); // Copy read cooordinates on Device
examine<<<gridSize,blockSize>>>(d_coordinates,d_coords_within,3*threads); // Launch gpu kernel for calculations
cudaEventRecord(stop); // Stop time reference
cudaEventSynchronize(stop); // Block CPU until "stop" event is recorded
cudaEventElapsedTime(&time_elapsed, start, stop); // Calculate the time elapsed in milliseconds
time_elapsed=time_elapsed/1000; // Convert milliseconds to seconds
j+=threads;
}
// Destroy CUDA timers
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(h_coords_within,d_coords_within,sizeof(int),cudaMemcpyDeviceToHost); // Copy results from Device to Host
//Printing results
printf("[+] Main part of the program was being executed for :: %.3f :: sec)\n", time_elapsed);
printf("[+] %ld coordinates have been analyzed\n[+] %d cooordinates were inside the area of interest\n[+] %ld coordinates read per second\n", loop_count, *h_coords_within, (time_elapsed<1?loop_count:loop_count/(int)time_elapsed));
// Free Host and Device memory
cudaFree(d_coordinates);
cudaFree(d_coords_within);
fclose(input);
free(h_coordinates);
free(h_coords_within);
return 0;
}
__global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines)
{
int index=blockIdx.x*3*blockDim.x+3*threadIdx.x; // find the index of starting element for each thread on each block
float coord1=d_coordinates[index],coord2=d_coordinates[index+1],coord3=d_coordinates[index+2]; // Copy cooordinates from GPU's global memory to thread's local memory
if(index>=d_lines) return;
if(coord1 >= MIN_LIM && coord1 <= MAX_LIM && coord2 >= MIN_LIM && coord2 <= MAX_LIM && coord3 >= MIN_LIM && coord3 <= MAX_LIM)
{
// If the current coordinate is within the accepted limits,
atomicAdd((unsigned int*)d_coords_within,1); // So as threads do not mess up the values
}
}
void check_input(int argc,char *argv[]) // Handle number of arguments errors and show usage
{
if (argc<6 || argc>6)
{
printf("[-] Usage: ./examine [max_collisions] [max_exec_time] [input_file] [Threads] [1D_blockSize]\nUse \"-1\": for no boundies \n");
if (argc==2) if (!strcmp(argv[1],"--help"))
{
printf("max_collisions: Maximum number of collisions\nmax_exec_time: Maximum execution time\ninput_file: Filename to examine\nThreads: Number of gpu-threads to use / # Rows in memmory\n1D_blocksize: gpu-blocksize to use" );
printf("\t ======Usefull info!======\n");
printf("1) 1D_blockSize must be a multiple of 32. (or whatever warp_size is supported by your GPU)\n2) Threads should be a multiple of blockSize\n 3)These 2 parameters are important for performance\n" );
}
exit(2);
}
}
long calc_lines(char *filename) // Calculates the lines of input file
{
FILE *file=fopen(filename,"r");
fseek(file,0L,SEEK_END); //set file position indicator right to the end-of-file
long lines=ftell(file); //store the number of bytes since the beginning of the file
fseek(file,0L,SEEK_SET);
fclose(file);
return lines/LSIZE; //return lines count of the file
}
|
7b3b6e9be4b0e215b1032f395cd0aed72fd0d992.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
using namespace std;
// the second kernel repairs the address which is corrupted by firest kernel
static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define BUF_LEN 8
#define N 2
class B
{
public:
__device__ virtual unsigned long f1(unsigned int hash)
{return 0;}
__device__ virtual unsigned long f2(unsigned int hash)
{return 0;}
__device__ virtual unsigned long f3(unsigned int hash)
{return 0;}
__device__ virtual unsigned long f4(unsigned int hash)
{return 0;}
};
class D:public B
{
public:
__device__ __noinline__ unsigned long f1(unsigned int hash);
__device__ __noinline__ unsigned long f2(unsigned int hash);
__device__ __noinline__ unsigned long f3(unsigned int hash);
__device__ __noinline__ unsigned long f4(unsigned int hash);
};
__device__ __noinline__ unsigned long D::f1(unsigned int hash)
{return hash;}
__device__ __noinline__ unsigned long D::f2(unsigned int hash)
{return 2*hash;}
__device__ __noinline__ unsigned long D::f3(unsigned int hash)
{return 3*hash;}
__device__ __noinline__ unsigned long D::f4(unsigned int hash)
{return 4*hash;}
__device__ __noinline__ unsigned long secret()
{
printf("Hello Admin!\n");
return 0x9999999999999999;
}
__device__ __noinline__ unsigned long unsafe(unsigned long *input,unsigned int len)
{
unsigned long res=0;
unsigned long hash=5381;
unsigned long *buf;
if(blockDim.x==2)
buf=(unsigned long *)malloc(sizeof(unsigned long)*BUF_LEN);
D *objD=new D;
printf("blockdim %d,thread %d,buf %p\n",blockDim.x,threadIdx.x,buf);
printf("blockdim %d,thread %d,secret %p\n",blockDim.x,threadIdx.x,secret);
printf("blockdim %d,thread %d,objD %p\n",blockDim.x,threadIdx.x,objD);
if(blockDim.x==2)
for(int i=0;i<len;i++)
{
buf[i]=input[i];
}
for(int i=0;i<BUF_LEN;i++)
hash=((hash<<5)+hash)+buf[i];
res=objD->f1(hash);
res=objD->f2(res);
res=objD->f3(res);
res=objD->f4(res);
if(blockDim.x==1)
for(int i=0;i<11;i++)
{ printf("blockdim %d %lx\n",blockDim.x,buf[i]);}
return res;
}
__global__ void test_kernel(unsigned long *hashes,unsigned long *input,unsigned int len,int *admin)
{
unsigned long my_hash;
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(*admin)
my_hash=secret();
else
my_hash=unsafe(input,len);
hashes[idx]=my_hash;
}
__global__ void test_kernel2(unsigned long *hashes2,unsigned long *input,unsigned int len,int *admin)
{
unsigned long my_hash;
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(*admin)
my_hash=secret();
else
my_hash=unsafe(input,len);
hashes2[idx]=my_hash;
}
int main()
{
unsigned long input[100];
unsigned long input2[100];
unsigned int len=8;
int admin=0;
unsigned long hashes[N];
unsigned long hashes2[N];
unsigned long *dev_hashes;
unsigned long *dev_hashes2;
unsigned long *dev_input;
unsigned long *dev_input2;
int *dev_admin;
for(int i=0;i<4;i++)
input[i]=0x14;
for(int i=4;i<60;i++)
input[i]=0x50263f920;
for(int i=0;i<4;i++)
input2[i]=0x14;
for(int i=4;i<40;i++)
input2[i]=0x51111110;
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_hashes,N*sizeof(unsigned long)));
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_hashes2,N*sizeof(unsigned long)));
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_input,100*sizeof(unsigned long)));
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_input2,100*sizeof(unsigned long)));
CUDA_CHECK_RETURN(hipMalloc((void**)&dev_admin,sizeof(int)));
CUDA_CHECK_RETURN(hipMemcpy(dev_input,input,100*sizeof(unsigned long),hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dev_input2,input2,100*sizeof(unsigned long),hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(dev_admin,&admin,sizeof(int),hipMemcpyHostToDevice));
// cout<<"start!"<<endl;
//0x50263f920
//0x1c0000001c0
hipLaunchKernelGGL(( test_kernel), dim3(1),dim3(N), 0, 0, dev_hashes,dev_input, len,dev_admin);
hipLaunchKernelGGL(( test_kernel2), dim3(1),dim3(1), 0, 0, dev_hashes2,dev_input2, len,dev_admin);
// CUDA_CHECK_RETURN(hipMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(&hashes2,dev_hashes2,N*sizeof(unsigned long),hipMemcpyDeviceToHost));
for(int i=0;i<N;i++)
{
printf("%d, %lx\n",i,hashes[i]);
}
for(int i=0;i<N;i++)
{
printf("%d, %lx\n",i,hashes2[i]);
}
cout<<endl;
hipFree(dev_hashes);
hipFree(dev_hashes2);
hipFree(dev_admin);
hipFree(dev_input);
//CUDA_CHECK_RETURN(hipFree(dev_hashes));
//CUDA_CHECK_RETURN(hipFree(dev_admin));
//CUDA_CHECK_RETURN(hipFree(dev_len));
//CUDA_CHECK_RETURN(hipFree(dev_input));
return 0;
}
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
| 7b3b6e9be4b0e215b1032f395cd0aed72fd0d992.cu | #include <stdio.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
using namespace std;
// the second kernel repairs the address which is corrupted by firest kernel
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define BUF_LEN 8
#define N 2
class B
{
public:
__device__ virtual unsigned long f1(unsigned int hash)
{return 0;}
__device__ virtual unsigned long f2(unsigned int hash)
{return 0;}
__device__ virtual unsigned long f3(unsigned int hash)
{return 0;}
__device__ virtual unsigned long f4(unsigned int hash)
{return 0;}
};
class D:public B
{
public:
__device__ __noinline__ unsigned long f1(unsigned int hash);
__device__ __noinline__ unsigned long f2(unsigned int hash);
__device__ __noinline__ unsigned long f3(unsigned int hash);
__device__ __noinline__ unsigned long f4(unsigned int hash);
};
__device__ __noinline__ unsigned long D::f1(unsigned int hash)
{return hash;}
__device__ __noinline__ unsigned long D::f2(unsigned int hash)
{return 2*hash;}
__device__ __noinline__ unsigned long D::f3(unsigned int hash)
{return 3*hash;}
__device__ __noinline__ unsigned long D::f4(unsigned int hash)
{return 4*hash;}
__device__ __noinline__ unsigned long secret()
{
printf("Hello Admin!\n");
return 0x9999999999999999;
}
__device__ __noinline__ unsigned long unsafe(unsigned long *input,unsigned int len)
{
unsigned long res=0;
unsigned long hash=5381;
unsigned long *buf;
if(blockDim.x==2)
buf=(unsigned long *)malloc(sizeof(unsigned long)*BUF_LEN);
D *objD=new D;
printf("blockdim %d,thread %d,buf %p\n",blockDim.x,threadIdx.x,buf);
printf("blockdim %d,thread %d,secret %p\n",blockDim.x,threadIdx.x,secret);
printf("blockdim %d,thread %d,objD %p\n",blockDim.x,threadIdx.x,objD);
if(blockDim.x==2)
for(int i=0;i<len;i++)
{
buf[i]=input[i];
}
for(int i=0;i<BUF_LEN;i++)
hash=((hash<<5)+hash)+buf[i];
res=objD->f1(hash);
res=objD->f2(res);
res=objD->f3(res);
res=objD->f4(res);
if(blockDim.x==1)
for(int i=0;i<11;i++)
{ printf("blockdim %d %lx\n",blockDim.x,buf[i]);}
return res;
}
__global__ void test_kernel(unsigned long *hashes,unsigned long *input,unsigned int len,int *admin)
{
unsigned long my_hash;
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(*admin)
my_hash=secret();
else
my_hash=unsafe(input,len);
hashes[idx]=my_hash;
}
__global__ void test_kernel2(unsigned long *hashes2,unsigned long *input,unsigned int len,int *admin)
{
unsigned long my_hash;
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(*admin)
my_hash=secret();
else
my_hash=unsafe(input,len);
hashes2[idx]=my_hash;
}
int main()
{
unsigned long input[100];
unsigned long input2[100];
unsigned int len=8;
int admin=0;
unsigned long hashes[N];
unsigned long hashes2[N];
unsigned long *dev_hashes;
unsigned long *dev_hashes2;
unsigned long *dev_input;
unsigned long *dev_input2;
int *dev_admin;
for(int i=0;i<4;i++)
input[i]=0x14;
for(int i=4;i<60;i++)
input[i]=0x50263f920;
for(int i=0;i<4;i++)
input2[i]=0x14;
for(int i=4;i<40;i++)
input2[i]=0x51111110;
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_hashes,N*sizeof(unsigned long)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_hashes2,N*sizeof(unsigned long)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_input,100*sizeof(unsigned long)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_input2,100*sizeof(unsigned long)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_admin,sizeof(int)));
CUDA_CHECK_RETURN(cudaMemcpy(dev_input,input,100*sizeof(unsigned long),cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dev_input2,input2,100*sizeof(unsigned long),cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dev_admin,&admin,sizeof(int),cudaMemcpyHostToDevice));
// cout<<"start!"<<endl;
//0x50263f920
//0x1c0000001c0
test_kernel<<<1,N>>>(dev_hashes,dev_input, len,dev_admin);
test_kernel2<<<1,1>>>(dev_hashes2,dev_input2, len,dev_admin);
// CUDA_CHECK_RETURN(cudaMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(&hashes2,dev_hashes2,N*sizeof(unsigned long),cudaMemcpyDeviceToHost));
for(int i=0;i<N;i++)
{
printf("%d, %lx\n",i,hashes[i]);
}
for(int i=0;i<N;i++)
{
printf("%d, %lx\n",i,hashes2[i]);
}
cout<<endl;
cudaFree(dev_hashes);
cudaFree(dev_hashes2);
cudaFree(dev_admin);
cudaFree(dev_input);
//CUDA_CHECK_RETURN(cudaFree(dev_hashes));
//CUDA_CHECK_RETURN(cudaFree(dev_admin));
//CUDA_CHECK_RETURN(cudaFree(dev_len));
//CUDA_CHECK_RETURN(cudaFree(dev_input));
return 0;
}
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
compute_scores.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute_scores.cuh"
#include <catboost/cuda/methods/kernel/score_calcers.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <contrib/libs/cub/hipcub/hipcub.hpp>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
#define ARGMAX() \
__shared__ float scores[BlockSize]; \
scores[tid] = bestScore; \
__shared__ int indices[BlockSize]; \
indices[tid] = bestIndex; \
__syncthreads();\
for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \
if (tid < s) { \
if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \
scores[tid] = scores[tid + s]; \
indices[tid] = indices[tid + s]; \
}\
}\
__syncthreads();\
} \
if (!tid) { \
const int index = indices[0];\
if (index != -1 && index < binFeatureCount) { \
result->FeatureId = bf[index].FeatureId;\
result->BinId = bf[index].BinId;\
result->Score = scores[0];\
} else {\
result->FeatureId = -1;\
result->BinId = -1;\
result->Score = FLT_MAX;\
}\
}
// histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplits(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int pCount,
const ui32* restPartIds, int restPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y * pCount;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
for (int i = 0; i < pCount; i++) {
const int leafId = __ldg(partIds + i);
const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f);
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + leafId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
}
}
//add fixed leaves
for (int i = 0; i < restPartCount; i++) {
const int leafId = __ldg(restPartIds + i);
const float weight = max(__ldg(partStats + leafId * statCount), 0.0f);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = __ldg(partStats + leafId * statCount + statId);
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
const float score = calcer.GetScore();
if (score < bestScore) {
bestScore = score;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partBlockSize, int partBlockCount,
const ui32* restPartIds, int restPartCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partBlockCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
template <int BlockSize>
__global__ void ComputeTargetVarianceImpl(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats) {
ui32 i = BlockSize * blockIdx.x + threadIdx.x;
float weightedSum = 0;
float weightedSum2 = 0;
float totalWeight = 0;
while (i < size) {
const float w = stats[i];
if (w > 1e-15f) {
float statSum = 0;
for (ui32 statId = 1; statId < statCount; ++statId) {
const float wt = stats[i + statLineSize * statId];
weightedSum += wt;
weightedSum2 += wt * wt / w; //cause we need sum w * t * t
statSum += wt;
}
if (isMulticlass) {
weightedSum += -statSum;
weightedSum2 += statSum * statSum / w;
}
totalWeight += w;
}
i += gridDim.x * BlockSize;
}
using BlockReduce = typename hipcub::BlockReduce<double, BlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
double blockWeightedSum = weightedSum;
blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum);
double blockWeightedSum2 = weightedSum2;
blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2);
double blockTotalWeight = totalWeight;
blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight);
if (threadIdx.x == 0) {
TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum);
TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2);
TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight);
}
}
void ComputeTargetVariance(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats,
TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize));
FillBuffer(aggregatedStats, 0.0, 3, stream);
if (numBlocks) {
hipLaunchKernelGGL(( ComputeTargetVarianceImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, stats, size, statCount, statLineSize, isMulticlass, aggregatedStats);
}
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y;
const int thisPartId = partIds[0];
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplit(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const int partId,
const int maybeSecondPartId,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
ui32 partId, ui32 maybeSecondPartId,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partId == maybeSecondPartId ? 1 : 2;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
//seems like this'll be faster on CPU
template <class TScoreCalcer>
void ComputeTreeScoreImpl(const double* partStats, int statCount,
const ui32* allPartIds, int allPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
double* result) {
calcer.NextFeature(TCBinFeature({100500, 42}));
for (int i = 0; i < allPartCount; ++i) {
const int leafId = allPartIds[i];
const double weight = max(partStats[leafId * statCount], 0.0);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = partStats[leafId * statCount + statId];
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
result[0] = calcer.GetScore();
}
void ComputeTreeScore(
const double* partStats,
int statCount,
const ui32* allPartIds,
int allPartCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
double* result,
TCudaStream) {
#define RUN() \
ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
#undef ARGMAX
}
| compute_scores.cu | #include "compute_scores.cuh"
#include <catboost/cuda/methods/kernel/score_calcers.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <contrib/libs/cub/cub/block/block_reduce.cuh>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
#define ARGMAX() \
__shared__ float scores[BlockSize]; \
scores[tid] = bestScore; \
__shared__ int indices[BlockSize]; \
indices[tid] = bestIndex; \
__syncthreads();\
for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \
if (tid < s) { \
if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \
scores[tid] = scores[tid + s]; \
indices[tid] = indices[tid + s]; \
}\
}\
__syncthreads();\
} \
if (!tid) { \
const int index = indices[0];\
if (index != -1 && index < binFeatureCount) { \
result->FeatureId = bf[index].FeatureId;\
result->BinId = bf[index].BinId;\
result->Score = scores[0];\
} else {\
result->FeatureId = -1;\
result->BinId = -1;\
result->Score = FLT_MAX;\
}\
}
// histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplits(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int pCount,
const ui32* restPartIds, int restPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y * pCount;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
for (int i = 0; i < pCount; i++) {
const int leafId = __ldg(partIds + i);
const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f);
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + leafId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
}
}
//add fixed leaves
for (int i = 0; i < restPartCount; i++) {
const int leafId = __ldg(restPartIds + i);
const float weight = max(__ldg(partStats + leafId * statCount), 0.0f);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = __ldg(partStats + leafId * statCount + statId);
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
const float score = calcer.GetScore();
if (score < bestScore) {
bestScore = score;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partBlockSize, int partBlockCount,
const ui32* restPartIds, int restPartCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partBlockCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
template <int BlockSize>
__global__ void ComputeTargetVarianceImpl(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats) {
ui32 i = BlockSize * blockIdx.x + threadIdx.x;
float weightedSum = 0;
float weightedSum2 = 0;
float totalWeight = 0;
while (i < size) {
const float w = stats[i];
if (w > 1e-15f) {
float statSum = 0;
for (ui32 statId = 1; statId < statCount; ++statId) {
const float wt = stats[i + statLineSize * statId];
weightedSum += wt;
weightedSum2 += wt * wt / w; //cause we need sum w * t * t
statSum += wt;
}
if (isMulticlass) {
weightedSum += -statSum;
weightedSum2 += statSum * statSum / w;
}
totalWeight += w;
}
i += gridDim.x * BlockSize;
}
using BlockReduce = typename cub::BlockReduce<double, BlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
double blockWeightedSum = weightedSum;
blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum);
double blockWeightedSum2 = weightedSum2;
blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2);
double blockTotalWeight = totalWeight;
blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight);
if (threadIdx.x == 0) {
TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum);
TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2);
TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight);
}
}
void ComputeTargetVariance(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats,
TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize));
FillBuffer(aggregatedStats, 0.0, 3, stream);
if (numBlocks) {
ComputeTargetVarianceImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(stats, size, statCount, statLineSize, isMulticlass, aggregatedStats);
}
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y;
const int thisPartId = partIds[0];
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplit(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const int partId,
const int maybeSecondPartId,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
ui32 partId, ui32 maybeSecondPartId,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partId == maybeSecondPartId ? 1 : 2;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
//seems like this'll be faster on CPU
template <class TScoreCalcer>
void ComputeTreeScoreImpl(const double* partStats, int statCount,
const ui32* allPartIds, int allPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
double* result) {
calcer.NextFeature(TCBinFeature({100500, 42}));
for (int i = 0; i < allPartCount; ++i) {
const int leafId = allPartIds[i];
const double weight = max(partStats[leafId * statCount], 0.0);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = partStats[leafId * statCount + statId];
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
result[0] = calcer.GetScore();
}
void ComputeTreeScore(
const double* partStats,
int statCount,
const ui32* allPartIds,
int allPartCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
double* result,
TCudaStream) {
#define RUN() \
ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
#undef ARGMAX
}
|
compute_scores.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute_scores.cuh"
#include <catboost/cuda/methods/kernel/score_calcers.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <contrib/libs/cub/hipcub/hipcub.hpp>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
#define ARGMAX() \
__shared__ float scores[BlockSize]; \
scores[tid] = bestScore; \
__shared__ int indices[BlockSize]; \
indices[tid] = bestIndex; \
__syncthreads();\
for (ui32 s = BlockSize >> 1; s > 0; s >>= 1) { \
if (tid < s) { \
if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { \
scores[tid] = scores[tid + s]; \
indices[tid] = indices[tid + s]; \
}\
}\
__syncthreads();\
} \
if (!tid) { \
const int index = indices[0];\
if (index != -1 && index < binFeatureCount) { \
result->FeatureId = bf[index].FeatureId;\
result->BinId = bf[index].BinId;\
result->Score = scores[0];\
} else {\
result->FeatureId = -1;\
result->BinId = -1;\
result->Score = FLT_MAX;\
}\
}
// histId * binFeatureCount * statCount + statId * binFeatureCount + features->FirstFoldIndex;
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplits(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int pCount,
const ui32* restPartIds, int restPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y * pCount;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
for (int i = 0; i < pCount; i++) {
const int leafId = __ldg(partIds + i);
const float weightLeft = max(__ldg(histograms + leafId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(__ldg(partStats + leafId * statCount) - weightLeft, 0.0f);
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + leafId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + leafId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
}
}
//add fixed leaves
for (int i = 0; i < restPartCount; i++) {
const int leafId = __ldg(restPartIds + i);
const float weight = max(__ldg(partStats + leafId * statCount), 0.0f);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = __ldg(partStats + leafId * statCount + statId);
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
const float score = calcer.GetScore();
if (score < bestScore) {
bestScore = score;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partBlockSize, int partBlockCount,
const ui32* restPartIds, int restPartCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partBlockCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplits<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, partBlockSize, restPartIds, restPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
template <int BlockSize>
__global__ void ComputeTargetVarianceImpl(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats) {
ui32 i = BlockSize * blockIdx.x + threadIdx.x;
float weightedSum = 0;
float weightedSum2 = 0;
float totalWeight = 0;
while (i < size) {
const float w = stats[i];
if (w > 1e-15f) {
float statSum = 0;
for (ui32 statId = 1; statId < statCount; ++statId) {
const float wt = stats[i + statLineSize * statId];
weightedSum += wt;
weightedSum2 += wt * wt / w; //cause we need sum w * t * t
statSum += wt;
}
if (isMulticlass) {
weightedSum += -statSum;
weightedSum2 += statSum * statSum / w;
}
totalWeight += w;
}
i += gridDim.x * BlockSize;
}
using BlockReduce = typename hipcub::BlockReduce<double, BlockSize>;
__shared__ typename BlockReduce::TempStorage tempStorage;
double blockWeightedSum = weightedSum;
blockWeightedSum = BlockReduce(tempStorage).Sum(blockWeightedSum);
double blockWeightedSum2 = weightedSum2;
blockWeightedSum2 = BlockReduce(tempStorage).Sum(blockWeightedSum2);
double blockTotalWeight = totalWeight;
blockTotalWeight = BlockReduce(tempStorage).Sum(blockTotalWeight);
if (threadIdx.x == 0) {
TAtomicAdd<double>::Add(aggregatedStats, blockWeightedSum);
TAtomicAdd<double>::Add(aggregatedStats + 1, blockWeightedSum2);
TAtomicAdd<double>::Add(aggregatedStats + 2, blockTotalWeight);
}
}
void ComputeTargetVariance(const float* stats,
ui32 size,
ui32 statCount,
ui64 statLineSize,
bool isMulticlass,
double* aggregatedStats,
TCudaStream stream) {
const ui32 blockSize = 512;
const ui32 numBlocks = min(4 * TArchProps::SMCount(), CeilDivide(size, blockSize));
FillBuffer(aggregatedStats, 0.0, 3, stream);
if (numBlocks) {
hipLaunchKernelGGL(( ComputeTargetVarianceImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, stats, size, statCount, statLineSize, isMulticlass, aggregatedStats);
}
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplitsRegion(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
partIds += blockIdx.y;
const int thisPartId = partIds[0];
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
template <int BlockSize,
class TScoreCalcer>
__global__ void ComputeOptimalSplit(const TCBinFeature* bf,
ui32 binFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const int partId,
const int maybeSecondPartId,
bool multiclassOptimization,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = -1;
int tid = threadIdx.x;
result += blockIdx.x + blockIdx.y * gridDim.x;
const int thisPartId = blockIdx.y == 0 ? partId : maybeSecondPartId;
for (int offset = blockIdx.x * BlockSize; offset < binFeatureCount; offset += BlockSize * gridDim.x) {
const int binFeatureId = offset + tid;
if (binFeatureId >= binFeatureCount) {
break;
}
calcer.NextFeature(bf[binFeatureId]);
TScoreCalcer beforeSplitCalcer = calcer;
const double partWeight = __ldg(partStats + thisPartId * statCount);
const float weightLeft = max(__ldg(histograms + thisPartId * statCount * binFeatureCount + binFeatureId), 0.0f);
const float weightRight = max(partWeight - weightLeft, 0.0f);
bool toZeroPartSplit = false;
if (weightLeft < 1e-20f || weightRight < 1e-20f) {
toZeroPartSplit = true;
}
double totalSumLeft = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
float sumLeft = __ldg(histograms + thisPartId * statCount * binFeatureCount + statId * binFeatureCount + binFeatureId);
double partStat = __ldg(partStats + thisPartId * statCount + statId);
totalSumPart += partStat;
float sumRight = static_cast<float>(partStat - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
beforeSplitCalcer.AddLeaf(partStat, partWeight);
totalSumLeft += sumLeft;
}
if (multiclassOptimization) {
double totalSumRight = totalSumPart - totalSumLeft;
calcer.AddLeaf(-totalSumLeft, weightLeft);
calcer.AddLeaf(-totalSumRight, weightRight);
beforeSplitCalcer.AddLeaf(-totalSumPart, partWeight);
}
const bool skip = toZeroPartSplit;
const float scoreAfter = !skip ? calcer.GetScore() : FLT_MAX;
const float scoreBefore = !skip ? beforeSplitCalcer.GetScore() : FLT_MAX;
//-10 - 0 = -10
//in gpu catboost all scores are inverse, lower is better
const float gain = !skip ? abs(scoreAfter - scoreBefore) * (scoreAfter < scoreBefore ? -1 : 1) : 0;
if (gain < bestScore) {
bestScore = gain;
bestIndex = binFeatureId;
}
}
ARGMAX()
}
void ComputeOptimalSplitsRegion(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
const ui32* partIds, int partCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partCount;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplitsRegion<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partIds, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void ComputeOptimalSplit(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
ui32 partId, ui32 maybeSecondPartId,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = argmaxBlockCount;
numBlocks.y = partId == maybeSecondPartId ? 1 : 2;
numBlocks.z = 1;
#define RUN() \
ComputeOptimalSplit<blockSize, TScoreCalcer> << < numBlocks, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, histograms, partStats, statCount, partId, maybeSecondPartId, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
//seems like this'll be faster on CPU
template <class TScoreCalcer>
void ComputeTreeScoreImpl(const double* partStats, int statCount,
const ui32* allPartIds, int allPartCount,
bool multiclassOptimization,
TScoreCalcer calcer,
double* result) {
calcer.NextFeature(TCBinFeature({100500, 42}));
for (int i = 0; i < allPartCount; ++i) {
const int leafId = allPartIds[i];
const double weight = max(partStats[leafId * statCount], 0.0);
double totalSum = 0;
double totalSumPart = 0;
for (int statId = 1; statId < statCount; ++statId) {
double sum = partStats[leafId * statCount + statId];
totalSumPart += sum;
calcer.AddLeaf(sum, weight);
totalSum += sum;
}
if (multiclassOptimization) {
calcer.AddLeaf(-totalSum, weight);
}
}
result[0] = calcer.GetScore();
}
void ComputeTreeScore(
const double* partStats,
int statCount,
const ui32* allPartIds,
int allPartCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
double* result,
TCudaStream) {
#define RUN() \
ComputeTreeScoreImpl(partStats, statCount, allPartIds, allPartCount, multiclassOptimization, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
#undef ARGMAX
}
| compute_scores.cuh | #pragma once
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
#include <catboost/cuda/gpu_data/gpu_structures.h>
#include <catboost/libs/options/enums.h>
namespace NKernel {
void ComputeOptimalSplits(const TCBinFeature* binaryFeatures, ui32 binaryFeatureCount,
const float* histograms,
const double* partStats, int statCount,
ui32* partIds, int partBlockSize, int partBlockCount,
TBestSplitProperties* result, ui32 argmaxBlockCount,
EScoreFunction scoreFunction,
bool multiclassOptimization,
double l2,
bool normalize,
double scoreStdDev,
ui64 seed,
TCudaStream stream);
};
|
abbfb8c1feb998604a9d77328793365d8429a690.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define CUCHK(call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
fflush(stderr); \
exit(EXIT_FAILURE); \
} }
__global__ void vec_add(float *a, float *b, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += blockDim.x*gridDim.x) {
a[i] = a[i] + b[i];
}
}
int main(int argc, char *argv[])
{
int n = 64*1024*1024;
float *a, *b;
CUCHK(hipMallocManaged(&a, n*sizeof(float)));
CUCHK(hipMallocManaged(&b, n*sizeof(float)));
for (int i = 0; i < n; i++) {
a[i] = 1;
b[i] = 2;
}
for (int iter = 0; iter < 2; iter++) {
hipLaunchKernelGGL(( vec_add), dim3(n/128), dim3(128), 0, 0, a, b, n);
CUCHK(hipDeviceSynchronize());
for (int i = 0; i < n; i++) {
a[i] += 1;
}
}
CUCHK(hipFree(a));
CUCHK(hipFree(b));
}
| abbfb8c1feb998604a9d77328793365d8429a690.cu | #include <stdio.h>
#define CUCHK(call) { \
cudaError_t err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
fflush(stderr); \
exit(EXIT_FAILURE); \
} }
__global__ void vec_add(float *a, float *b, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += blockDim.x*gridDim.x) {
a[i] = a[i] + b[i];
}
}
int main(int argc, char *argv[])
{
int n = 64*1024*1024;
float *a, *b;
CUCHK(cudaMallocManaged(&a, n*sizeof(float)));
CUCHK(cudaMallocManaged(&b, n*sizeof(float)));
for (int i = 0; i < n; i++) {
a[i] = 1;
b[i] = 2;
}
for (int iter = 0; iter < 2; iter++) {
vec_add<<<n/128, 128>>>(a, b, n);
CUCHK(cudaDeviceSynchronize());
for (int i = 0; i < n; i++) {
a[i] += 1;
}
}
CUCHK(cudaFree(a));
CUCHK(cudaFree(b));
}
|
ad38f3bd2cbadf0eee36d05b468980f3501aca5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file depth_dot.cu
* \brief
* \author Junyuan Xie
*/
#include "./depth_dot-inl.h"
#include "./mshadow_op.h"
namespace mxnet {
namespace op {
namespace depthdot {
__global__ void DepthDotForward(real_t *data, real_t *label, real_t *out, int s0, int s1) {
extern __shared__ real_t slabel[];
const int C = gridDim.y;
const int H = gridDim.x;
const int W = blockDim.x;
const int n = blockIdx.z;
const int c = blockIdx.y;
const int h = blockIdx.x;
const int w = threadIdx.x;
const int i = ((n*C+c)*H+h)*W+w;
const int stride = s1 - s0 + 1;
if (w < W) slabel[w] = label[i];
__syncthreads();
real_t o = 0.0f;
for (int j = s0; j < s1; ++j) {
real_t l;
if (w - j < 0) {
l = slabel[0];
} else if (w - j >= W) {
l = slabel[W-1];
} else {
l = slabel[w-j];
}
int i_data = ((n*stride + j - s0 + 1)*H + h)*W + w;
real_t d = data[i_data];
o += l*d;
}
out[i] = o;
}
__global__ void DepthDotBackward(real_t *label, real_t *out_grad, real_t *in_grad, int C, int s0, int s1) {
extern __shared__ real_t shared[];
const int H = gridDim.x;
const int W = blockDim.x;
real_t *slabel = shared;
real_t *sgrad = shared + C*W;
const int n = blockIdx.z;
const int h = blockIdx.x;
const int w = threadIdx.x;
const int stride = s1 - s0 + 1;
for (int c = 0; c < C; ++c) {
const int i = ((n*C+c)*H+h)*W+w;
if (w < W) {
slabel[c*W + w] = label[i];
sgrad[c*W + w] = out_grad[i];
}
}
__syncthreads();
for (int j = s0; j < s1; ++j) {
real_t o = 0.0f;
for (int c = 0; c < C; ++c) {
real_t l;
if (w - j < 0) {
l = slabel[c*W];
} else if (w - j >= W) {
l = slabel[c*W+W-1];
} else {
l = slabel[c*W+w-j];
}
real_t og = sgrad[c*W+w];
o += l*og;
}
in_grad[((n*stride+j-s0+1)*H+h)*W+w] = o;
}
in_grad[((n*stride)*H+h)*W+w] = 0;
}
template<int MAX_D>
__global__ void DepthDotForwardUpSample(real_t *data, real_t *label, real_t *out, int C, int H, int W, int s0, int s1, int upsample) {
extern __shared__ real_t shared[];
const int D = s1 - s0 + 1;
const int uH = H*upsample;
const int uW = W*upsample;
const int i = blockIdx.x;
const int h = i%H;
const int n = i/H;
const int w = threadIdx.x;
real_t *slabel = shared;
real_t *sout = shared + C*uW;
real_t sdata[MAX_D+1][2][2];
for (int d = 0; d < D; ++d) {
#pragma unroll
for (int hh = 0; hh < 2; ++hh) {
#pragma unroll
for (int ww = 0; ww < 2; ++ww) {
if (w+ww < W && h+hh < H && d < D) {
sdata[d][hh][ww] = data[((n*D+d)*H+h+hh)*W+w+ww];
} else {
sdata[d][hh][ww] = sdata[d][0][0];
}
}
}
}
sdata[D][0][0] = sdata[D-1][0][0];
sdata[D][0][1] = sdata[D-1][0][1];
sdata[D][1][0] = sdata[D-1][1][0];
sdata[D][1][1] = sdata[D-1][1][1];
for (int hh = 0; hh < upsample; ++hh) {
for (int c = 0; c < C; ++c) {
for (int u = 0; u < upsample; ++u) {
slabel[c*uW+u*W+w] = label[((n*C+c)*uH+h*upsample+hh)*uW+u*W+w];
sout[c*uW+u*W+w] = 0.f;
}
}
__syncthreads();
for (int ww = 0; ww < upsample; ++ww) {
int idx = w*upsample+ww;
real_t wd = static_cast<real_t>(ww)/upsample;
real_t hd = static_cast<real_t>(hh)/upsample;
real_t norm = (sdata[0][0][0]*(1.f-wd) + sdata[0][0][1]*wd) * (1.f-hd) + (sdata[0][1][0]*(1.f-wd) + sdata[0][1][1]*wd) * hd;
for (int d = 1; d < D; ++d) {
for (int dd = 0; dd < upsample; ++dd) {
int shift = (d-1+s0)*upsample + dd;
real_t zd = static_cast<real_t>(dd)/upsample;
real_t tri00 = sdata[d][0][0]*(1.f - wd) + sdata[d][0][1]*wd;
real_t tri01 = sdata[d][1][0]*(1.f - wd) + sdata[d][1][1]*wd;
real_t tri10 = sdata[d+1][0][0]*(1.f - wd) + sdata[d+1][0][1]*wd;
real_t tri11 = sdata[d+1][1][0]*(1.f - wd) + sdata[d+1][1][1]*wd;
real_t tri0 = tri00*(1.f - hd) + tri01*hd;
real_t tri1 = tri10*(1.f - hd) + tri11*hd;
real_t tri = tri0*(1.f - zd) + tri1*zd;
tri *= dd==0;
norm += tri;
if (idx - shift < 0) {
for (int c = 0; c < C; ++c) {
sout[c*uW+idx] += tri * slabel[c*uW];
}
} else if (idx - shift >= uW) {
for (int c = 0; c < C; ++c) {
sout[c*uW+idx] += tri * slabel[c*uW + uW - 1];
}
} else {
for (int c = 0; c < C; ++c) {
sout[c*uW+idx] += tri * slabel[c*uW + idx - shift];
}
}
}
}
for (int c = 0; c < C; ++c) sout[c*uW+idx] /= norm;
}
__syncthreads();
for (int c = 0; c < C; ++c) {
for (int u = 0; u < upsample; ++u) {
out[((n*C+c)*uH+h*upsample+hh)*uW+u*W+w] = sout[c*uW+u*W+w];
}
}
__syncthreads();
}
}
} // depthdot
template<typename xpu>
class DepthDotOp : public Operator {
public:
explicit DepthDotOp(DepthDotParam param) {
param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(req[depthdot::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2> data = in_data[depthdot::kData].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> label = in_data[depthdot::kLabel].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> out = out_data[depthdot::kOut].FlatTo2D<xpu, real_t>(s);
if (param_.upsample > 1) {
TShape dshape = in_data[depthdot::kData].shape_;
TShape lshape = in_data[depthdot::kLabel].shape_;
dim3 dimBlock(dshape[3]);
dim3 dimGrid(dshape[2]*dshape[0]);
CHECK_LE(param_.scale[1]-param_.scale[0]+1, 33);
hipLaunchKernelGGL(( mxnet::op::depthdot::DepthDotForwardUpSample<33>), dim3(dimGrid), dim3(dimBlock), 2*param_.upsample*lshape[1]*dshape[3]*sizeof(real_t), Stream<gpu>::GetStream(s),
data.dptr_, label.dptr_, out.dptr_, lshape[1], dshape[2], dshape[3], param_.scale[0], param_.scale[1], param_.upsample);
} else {
TShape oshape = out_data[depthdot::kOut].shape_;
dim3 dimBlock(oshape[3]);
dim3 dimGrid(oshape[2], oshape[1], oshape[0]);
hipLaunchKernelGGL(( mxnet::op::depthdot::DepthDotForward), dim3(dimGrid), dim3(dimBlock), oshape[3]*sizeof(real_t), Stream<gpu>::GetStream(s),
data.dptr_, label.dptr_, out.dptr_, param_.scale[0], param_.scale[1]);
}
hipStreamSynchronize(Stream<gpu>::GetStream(s));
}
virtual void Backward(const OpContext & ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(req[depthdot::kData], kWriteTo);
CHECK_EQ(req[depthdot::kLabel], kNullOp);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2> label = in_data[depthdot::kLabel].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> igrad = in_grad[depthdot::kData].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> ograd = out_grad[depthdot::kOut].FlatTo2D<xpu, real_t>(s);
TShape oshape = out_grad[depthdot::kOut].shape_;
dim3 dimBlock(oshape[3]);
dim3 dimGrid(oshape[2], 1, oshape[0]);
hipLaunchKernelGGL(( mxnet::op::depthdot::DepthDotBackward), dim3(dimGrid), dim3(dimBlock), 2*oshape[3]*oshape[1]*sizeof(real_t), Stream<gpu>::GetStream(s),
label.dptr_, ograd.dptr_, igrad.dptr_, oshape[1], param_.scale[0], param_.scale[1]);
hipStreamSynchronize(Stream<gpu>::GetStream(s));
}
private:
DepthDotParam param_;
}; // class DepthDotOp
template<>
Operator *CreateOp<gpu>(DepthDotParam param) {
return new DepthDotOp<gpu>(param);
}
} // op
} // namespace mxnet
| ad38f3bd2cbadf0eee36d05b468980f3501aca5d.cu | /*!
* Copyright (c) 2015 by Contributors
* \file depth_dot.cu
* \brief
* \author Junyuan Xie
*/
#include "./depth_dot-inl.h"
#include "./mshadow_op.h"
namespace mxnet {
namespace op {
namespace depthdot {
__global__ void DepthDotForward(real_t *data, real_t *label, real_t *out, int s0, int s1) {
extern __shared__ real_t slabel[];
const int C = gridDim.y;
const int H = gridDim.x;
const int W = blockDim.x;
const int n = blockIdx.z;
const int c = blockIdx.y;
const int h = blockIdx.x;
const int w = threadIdx.x;
const int i = ((n*C+c)*H+h)*W+w;
const int stride = s1 - s0 + 1;
if (w < W) slabel[w] = label[i];
__syncthreads();
real_t o = 0.0f;
for (int j = s0; j < s1; ++j) {
real_t l;
if (w - j < 0) {
l = slabel[0];
} else if (w - j >= W) {
l = slabel[W-1];
} else {
l = slabel[w-j];
}
int i_data = ((n*stride + j - s0 + 1)*H + h)*W + w;
real_t d = data[i_data];
o += l*d;
}
out[i] = o;
}
__global__ void DepthDotBackward(real_t *label, real_t *out_grad, real_t *in_grad, int C, int s0, int s1) {
extern __shared__ real_t shared[];
const int H = gridDim.x;
const int W = blockDim.x;
real_t *slabel = shared;
real_t *sgrad = shared + C*W;
const int n = blockIdx.z;
const int h = blockIdx.x;
const int w = threadIdx.x;
const int stride = s1 - s0 + 1;
for (int c = 0; c < C; ++c) {
const int i = ((n*C+c)*H+h)*W+w;
if (w < W) {
slabel[c*W + w] = label[i];
sgrad[c*W + w] = out_grad[i];
}
}
__syncthreads();
for (int j = s0; j < s1; ++j) {
real_t o = 0.0f;
for (int c = 0; c < C; ++c) {
real_t l;
if (w - j < 0) {
l = slabel[c*W];
} else if (w - j >= W) {
l = slabel[c*W+W-1];
} else {
l = slabel[c*W+w-j];
}
real_t og = sgrad[c*W+w];
o += l*og;
}
in_grad[((n*stride+j-s0+1)*H+h)*W+w] = o;
}
in_grad[((n*stride)*H+h)*W+w] = 0;
}
template<int MAX_D>
__global__ void DepthDotForwardUpSample(real_t *data, real_t *label, real_t *out, int C, int H, int W, int s0, int s1, int upsample) {
extern __shared__ real_t shared[];
const int D = s1 - s0 + 1;
const int uH = H*upsample;
const int uW = W*upsample;
const int i = blockIdx.x;
const int h = i%H;
const int n = i/H;
const int w = threadIdx.x;
real_t *slabel = shared;
real_t *sout = shared + C*uW;
real_t sdata[MAX_D+1][2][2];
for (int d = 0; d < D; ++d) {
#pragma unroll
for (int hh = 0; hh < 2; ++hh) {
#pragma unroll
for (int ww = 0; ww < 2; ++ww) {
if (w+ww < W && h+hh < H && d < D) {
sdata[d][hh][ww] = data[((n*D+d)*H+h+hh)*W+w+ww];
} else {
sdata[d][hh][ww] = sdata[d][0][0];
}
}
}
}
sdata[D][0][0] = sdata[D-1][0][0];
sdata[D][0][1] = sdata[D-1][0][1];
sdata[D][1][0] = sdata[D-1][1][0];
sdata[D][1][1] = sdata[D-1][1][1];
for (int hh = 0; hh < upsample; ++hh) {
for (int c = 0; c < C; ++c) {
for (int u = 0; u < upsample; ++u) {
slabel[c*uW+u*W+w] = label[((n*C+c)*uH+h*upsample+hh)*uW+u*W+w];
sout[c*uW+u*W+w] = 0.f;
}
}
__syncthreads();
for (int ww = 0; ww < upsample; ++ww) {
int idx = w*upsample+ww;
real_t wd = static_cast<real_t>(ww)/upsample;
real_t hd = static_cast<real_t>(hh)/upsample;
real_t norm = (sdata[0][0][0]*(1.f-wd) + sdata[0][0][1]*wd) * (1.f-hd) + (sdata[0][1][0]*(1.f-wd) + sdata[0][1][1]*wd) * hd;
for (int d = 1; d < D; ++d) {
for (int dd = 0; dd < upsample; ++dd) {
int shift = (d-1+s0)*upsample + dd;
real_t zd = static_cast<real_t>(dd)/upsample;
real_t tri00 = sdata[d][0][0]*(1.f - wd) + sdata[d][0][1]*wd;
real_t tri01 = sdata[d][1][0]*(1.f - wd) + sdata[d][1][1]*wd;
real_t tri10 = sdata[d+1][0][0]*(1.f - wd) + sdata[d+1][0][1]*wd;
real_t tri11 = sdata[d+1][1][0]*(1.f - wd) + sdata[d+1][1][1]*wd;
real_t tri0 = tri00*(1.f - hd) + tri01*hd;
real_t tri1 = tri10*(1.f - hd) + tri11*hd;
real_t tri = tri0*(1.f - zd) + tri1*zd;
tri *= dd==0;
norm += tri;
if (idx - shift < 0) {
for (int c = 0; c < C; ++c) {
sout[c*uW+idx] += tri * slabel[c*uW];
}
} else if (idx - shift >= uW) {
for (int c = 0; c < C; ++c) {
sout[c*uW+idx] += tri * slabel[c*uW + uW - 1];
}
} else {
for (int c = 0; c < C; ++c) {
sout[c*uW+idx] += tri * slabel[c*uW + idx - shift];
}
}
}
}
for (int c = 0; c < C; ++c) sout[c*uW+idx] /= norm;
}
__syncthreads();
for (int c = 0; c < C; ++c) {
for (int u = 0; u < upsample; ++u) {
out[((n*C+c)*uH+h*upsample+hh)*uW+u*W+w] = sout[c*uW+u*W+w];
}
}
__syncthreads();
}
}
} // depthdot
template<typename xpu>
class DepthDotOp : public Operator {
public:
explicit DepthDotOp(DepthDotParam param) {
param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(req[depthdot::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2> data = in_data[depthdot::kData].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> label = in_data[depthdot::kLabel].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> out = out_data[depthdot::kOut].FlatTo2D<xpu, real_t>(s);
if (param_.upsample > 1) {
TShape dshape = in_data[depthdot::kData].shape_;
TShape lshape = in_data[depthdot::kLabel].shape_;
dim3 dimBlock(dshape[3]);
dim3 dimGrid(dshape[2]*dshape[0]);
CHECK_LE(param_.scale[1]-param_.scale[0]+1, 33);
mxnet::op::depthdot::DepthDotForwardUpSample<33><<<dimGrid, dimBlock, 2*param_.upsample*lshape[1]*dshape[3]*sizeof(real_t), Stream<gpu>::GetStream(s)>>>(
data.dptr_, label.dptr_, out.dptr_, lshape[1], dshape[2], dshape[3], param_.scale[0], param_.scale[1], param_.upsample);
} else {
TShape oshape = out_data[depthdot::kOut].shape_;
dim3 dimBlock(oshape[3]);
dim3 dimGrid(oshape[2], oshape[1], oshape[0]);
mxnet::op::depthdot::DepthDotForward<<<dimGrid, dimBlock, oshape[3]*sizeof(real_t), Stream<gpu>::GetStream(s)>>>(
data.dptr_, label.dptr_, out.dptr_, param_.scale[0], param_.scale[1]);
}
cudaStreamSynchronize(Stream<gpu>::GetStream(s));
}
virtual void Backward(const OpContext & ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_args) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(req[depthdot::kData], kWriteTo);
CHECK_EQ(req[depthdot::kLabel], kNullOp);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2> label = in_data[depthdot::kLabel].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> igrad = in_grad[depthdot::kData].FlatTo2D<xpu, real_t>(s);
Tensor<xpu, 2> ograd = out_grad[depthdot::kOut].FlatTo2D<xpu, real_t>(s);
TShape oshape = out_grad[depthdot::kOut].shape_;
dim3 dimBlock(oshape[3]);
dim3 dimGrid(oshape[2], 1, oshape[0]);
mxnet::op::depthdot::DepthDotBackward<<<dimGrid, dimBlock, 2*oshape[3]*oshape[1]*sizeof(real_t), Stream<gpu>::GetStream(s)>>>(
label.dptr_, ograd.dptr_, igrad.dptr_, oshape[1], param_.scale[0], param_.scale[1]);
cudaStreamSynchronize(Stream<gpu>::GetStream(s));
}
private:
DepthDotParam param_;
}; // class DepthDotOp
template<>
Operator *CreateOp<gpu>(DepthDotParam param) {
return new DepthDotOp<gpu>(param);
}
} // op
} // namespace mxnet
|
020ad86ca2cb22b22c62e711e8ef09a002417f1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixColour.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixColour), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixColour), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixColour), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 020ad86ca2cb22b22c62e711e8ef09a002417f1c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixColour.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixColour<<<gridBlock,threadBlock>>>(a,b,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixColour<<<gridBlock,threadBlock>>>(a,b,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixColour<<<gridBlock,threadBlock>>>(a,b,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1c108242cee03d57befde648f5f31dd2bfc3885a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**************************************************************
The code in time.h is a part of a course on cuda taught by its authors:
Lokman A. Abbas-Turki
**************************************************************/
#include "timer.h"
/**************************************************************
Common functions
**************************************************************/
// Compare function for qsort
int compare_function(const void *a,const void *b) {
float *x = (float *) a;
float *y = (float *) b;
if (*x < *y) return - 1;
else if (*x > *y) return 1;
return 0;
}
// Generate gaussian vector using Box Muller
void gaussian_vector(float *v, float mu, float sigma, int n) {
for (int i = 0; i<n; i++){
float u1 = (float)rand()/(float)(RAND_MAX);
float u2 = (float)rand()/(float)(RAND_MAX);
v[i] = sigma * (sqrtf( -2 * logf(u1)) * cosf(2 * M_PI * u2)) + mu;
}
}
//Function to print a small vector of floats on host
void print_vector(float *c, int m, int n) {
for (int i=0; i<m; i++){
printf("%f ", c[i]);
printf("\n");
}
}
/**************************************************************
CPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
void square_vector(float *z, float *znorm, int n){
for (int i = 0; i < n; i++) {
float zi = z[i];
float zsqri = zi * zi;
z[i] = zsqri;
znorm[0] += zsqri;
}
}
// Function for computing f (the secular function of interest) at a given point x
float secfunc(float *d, float *zsqr, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqr[i] / (d[i] - x);
}
return rho + sum;
}
// Function for computing f' (the prime derivative of the secular function of interest) at a given point x
float secfunc_prime(float *d, float *zsqr, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = d[i];
sum += zsqr[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
float secfunc_second(float *d, float *zsqr, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = d[i];
sum += zsqr[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
float discrimant_int(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
float discrimant_ext(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
float h_secfunc(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
float initialization_int(float *d, float *zsqr, float rho, int k, int n){
float d_k = d[k];
float d_kplus1 = d[k + 1];
float zsqr_k = zsqr[k];
float zsqr_kplus1 = zsqr[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc(d, zsqr, rho, middle, n);
float c = f - h_secfunc(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
float initialization_ext(float *d, float *zsqr, float *znorm, float rho, int n){
float d_nminus1 = d[n - 1];
float d_nminus2 = d[n - 2];
float d_n = d_nminus1 + znorm[0] / rho;
float zsqr_nminus1 = zsqr[n - 1];
float zsqr_nminus2 = zsqr[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc(d, zsqr, rho, middle, n);
if (f <= 0){
float hd = h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
float a_gragg(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
float b_gragg(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
float c_gragg(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
float eta_int(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg(f, fprime, delta_k, delta_kplus1);
float b = b_gragg(f, delta_k, delta_kplus1);
float c = c_gragg(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
float eta_ext(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg(f, delta_nminus2, delta_nminus1);
float c = c_gragg(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
float find_root_int(float *d, float *zsqr, float rho, float x, int k, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float f = secfunc(d, zsqr, rho, x, n);;
float d_k = d[k];
float d_kplus1 = d[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_int(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
if (k%(int)(n/10) == 0){
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", k, x, f, i);
}
*loss_CPU += (float)(abs(f)/n);
return x;
}
// Iterate to find the last root (the exterior one)
float find_root_ext(float *d, float *zsqr, float rho, float x, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float d_nminus2 = d[n - 2];
float d_nminus1 = d[n - 1];
float f = secfunc(d, zsqr, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_ext(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Print the last eigen value
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", n - 1, x, f, i);
*loss_CPU += (float)(abs(f)/n);
return x;
}
void find_roots(float *xstar, float *x0, float *d, float *zsqr, float *znorm, float rho, int n, int maxit, float epsilon, float *loss_CPU){
// We make sure that the loss is set to 0
*loss_CPU =0;
for (int i=0; i<n-1; i++){
xstar[i] = find_root_int(d, zsqr, rho, x0[i], i, n, maxit, epsilon, loss_CPU);
}
xstar[n - 1] = find_root_ext(d, zsqr, rho, x0[n - 1], n, maxit, epsilon, loss_CPU);
}
void initialize_x0(float *x0, float *d, float *zsqr, float *znorm, float rho, int n){
for (int i=0; i<n-1; i++){
x0[i] = initialization_int(d, zsqr, rho, i, n);
}
x0[n - 1] = initialization_ext(d, zsqr, znorm, rho, n);
}
/**************************************************************
GPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
__global__ void square_kernel_g(float *zsqrGPU, float *znormGPU, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while(idx < n){
float zi = zsqrGPU[idx];
float zsqr_i = zi * zi;
zsqrGPU[idx] = zi * zi;
atomicAdd(znormGPU, zsqr_i);
idx += gridDim.x * blockDim.x;
}
}
// Device function for computing f (the secular function of interest) at a given point x
__device__ float secfunc_g(float *dGPU, float *zsqrGPU, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqrGPU[i] / (dGPU[i] - x);
}
return rho + sum;
}
// Device function for computing f' (the prime derivative of the secular function of interest) at a given point x
__device__ float secfunc_prime_g(float *dGPU, float *zsqrGPU, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
__device__ float secfunc_second_g(float *dGPU, float *zsqrGPU, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
__device__ float discrimant_int_g(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
__device__ float discrimant_ext_g(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
__device__ float h_secfunc_g(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_int_g(float *dGPU, float *zsqrGPU, float rho, int k, int n){
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
float zsqr_k = zsqrGPU[k];
float zsqr_kplus1 = zsqrGPU[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
float c = f - h_secfunc_g(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int_g(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int_g(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_ext_g(float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
float d_nminus1 = dGPU[n - 1];
float d_nminus2 = dGPU[n - 2];
float d_n = d_nminus1 + znormGPU[0] / rho;
float zsqr_nminus1 = zsqrGPU[n - 1];
float zsqr_nminus2 = zsqrGPU[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
if (f <= 0){
float hd = h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
__device__ float a_gragg_g(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
__device__ float b_gragg_g(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
__device__ float c_gragg_g(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
__device__ float eta_int_g(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg_g(f, fprime, delta_k, delta_kplus1);
float b = b_gragg_g(f, delta_k, delta_kplus1);
float c = c_gragg_g(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int_g(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
__device__ float eta_ext_g(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg_g(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg_g(f, delta_nminus2, delta_nminus1);
float c = c_gragg_g(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext_g(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
__device__ float find_root_int_g(float *dGPU, float *zsqrGPU, float rho, float x, int k, int n, int maxit, float epsilon, float * avloss_GPU){
int i = 0;
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);;
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_int_g(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
if (k%(int)(n/10) == 0){
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", k, x, f, i);
}
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Iterate to find the last root (the exterior one)
__device__ float find_root_ext_g(float *dGPU, float *zsqrGPU, float rho, float x, int n, int maxit, float epsilon, float* avloss_GPU){
int i = 0;
float d_nminus2 = dGPU[n - 2];
float d_nminus1 = dGPU[n - 1];
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_ext_g(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Print the last eigen value
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", n - 1, x, f, i);
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Kernel to launch and distribute the searching of roots among GPU cores
__global__ void find_roots_kernel_g(float *xstarGPU, float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n, int maxit, float epsilon, float *avloss_GPU){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// We make sure that the average loss is set to 0
*avloss_GPU =0;
// First core gets search of the last root (the exterior one)
if (idx == 0){
float x = x0GPU[n - 1];
xstarGPU[n - 1] = find_root_ext_g(dGPU, zsqrGPU, rho, x, n, maxit, epsilon, avloss_GPU);
}
// Each next core searches one interval (interior interval)
else {
while (idx < n) {
float x = x0GPU[idx - 1];
xstarGPU[idx - 1] = find_root_int_g(dGPU, zsqrGPU, rho, x, idx - 1, n, maxit, epsilon, avloss_GPU);
// in case we have not launched enough cores to cover all intervals
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to compute the initial guesses from the paper on GPU
__global__ void initialize_x0_kernel_g(float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core compute the initial guess for last root (the exterior one)
if (idx == 0){
x0GPU[n - 1] = initialization_ext_g(dGPU, zsqrGPU, znormGPU, rho, n);
}
// Each next core compute initial guess for one interval (interior interval)
else {
while (idx < n) {
x0GPU[idx - 1] = initialization_int_g(dGPU, zsqrGPU, rho, idx - 1, n);
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to "wake up" the GPU
__global__ void wake_up(int *test)
{
__shared__ int c;
c = 3;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 1024)
{
test[idx] += c;
}
}
int main (void) {
/****************** Declaration ******************/
// Declare vectors or floats
float *d, *z, *zsqr, *znorm, *x0, *xstar, *loss_GPU, *loss_CPU;
// rho parameter
float rho = 10;
// Size of arrow matrix chosen by the user
//int n= 10;
int n;
printf("\nWhich n (number of roots for the function) do you want? \n");
scanf("%d", &n);
printf("\n \n******************* CHOICE OF N ******************** \n");
printf("n = %d\n", n);
//Maximum number of iterations
int maxit = 1e4;
//Stopping criterion
float epsilon = 1e-6;
// Memory allocation
d = (float*)malloc(n*sizeof(float));
z = (float*)malloc(n*sizeof(float));
zsqr = (float*)malloc(n*sizeof(float));
znorm = (float*)malloc(sizeof(float));
x0 = (float*)malloc(n*sizeof(float));
xstar = (float*)malloc(n*sizeof(float));
loss_GPU = (float*)malloc(sizeof(float));
loss_CPU = (float*)malloc(sizeof(float));
// Create instance of class Timer
Timer TimG, TimC;
//Fill the vector d with linear function of n
for (int i=0; i < n; i++){
d[i] = 2 * n - i;
}
// sort the vector in ascending order
qsort(d, n, sizeof(float), compare_function);
// Gaussian rank 1 perturbation
float mu_z = 5;
float sigma_z = 1;
gaussian_vector(z, mu_z, sigma_z, n);
gaussian_vector(zsqr, mu_z, sigma_z, n);
printf("\n\n**************************************************** \n");
printf("*********************** GPU ************************ \n");
printf("**************************************************** \n\n\n");
printf("********************* CONTROLS ********************* \n");
printf("We print the first, the last and 10 %% of the interior eigenvalues as a check \n");
// We first wake up the GPU if first iteration
int *testGPU;
hipMalloc(&testGPU, 1024*sizeof(int));
hipLaunchKernelGGL(( wake_up) , dim3(1024), dim3(512), 0, 0, testGPU);
hipFree(testGPU);
// Start timer GPU
TimG.start();
/***************** GPU memory alloc *****************/
// Declare vectors on GPU
float *dGPU, *zsqrGPU, *znormGPU, *x0GPU, *xstarGPU, *avloss_GPU;
// Create memory space for vectors on GPU
hipMalloc(&dGPU, n*sizeof(float));
hipMalloc(&zsqrGPU, n*sizeof(float));
hipMalloc(&znormGPU, sizeof(float));
hipMalloc(&x0GPU, n*sizeof(float));
hipMalloc(&avloss_GPU, sizeof(float));
// Container for the results
hipMalloc(&xstarGPU, n*sizeof(float));
/***************** Transfer on GPU *****************/
// Transfers on GPU
hipMemcpy(dGPU, d, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(zsqrGPU, z, n*sizeof(float), hipMemcpyHostToDevice);
// We first compute the square and squared norm
hipLaunchKernelGGL(( square_kernel_g) , dim3(1024), dim3(512), 0, 0, zsqrGPU, znormGPU, n);
// Initialization of x0 on GPU
hipLaunchKernelGGL(( initialize_x0_kernel_g) , dim3(1024), dim3(512), 0, 0, x0GPU, dGPU, zsqrGPU, znormGPU, rho, n);
/***************** Root computation ****************/
// Find roots on GPU
hipLaunchKernelGGL(( find_roots_kernel_g) , dim3(1024), dim3(512), 0, 0, xstarGPU, x0GPU, dGPU, zsqrGPU, znormGPU, rho, n, maxit, epsilon, avloss_GPU);
// Transfer results on CPU to print it
hipMemcpy(xstar, xstarGPU, n*sizeof(float), hipMemcpyDeviceToHost);
// End timer
TimG.add();
// Collect the average spectral loss
hipMemcpy(loss_GPU, avloss_GPU, sizeof(float), hipMemcpyDeviceToHost);
// Print the first zeros
// Number of roots to display
int m = 10;
printf("\n********************* RESULTS ********************** \n");
printf("The first %i resulting roots (eigen values) are : \n", m);
print_vector(xstar, m, n);
// Free memory on GPU
hipFree(dGPU);
hipFree(zsqrGPU);
hipFree(znormGPU);
hipFree(x0GPU);
hipFree(xstarGPU);
hipFree(avloss_GPU);
printf("\n\n**************************************************** \n");
printf("*********************** CPU ************************ \n");
printf("**************************************************** \n\n\n");
printf("********************* CONTROLS ********************* \n");
printf("We print the first, the last and 10 %% of the interior eigenvalues as a check \n");
// Start timer CPU
TimC.start();
// We first compute the square and squared norm
square_vector(zsqr, znorm, n);
// Initialization of x0
initialize_x0(x0, d, zsqr, znorm, rho, n);
/***************** Root computation ****************/
// Find roots
find_roots(xstar, x0, d, zsqr, znorm, rho, n, maxit, epsilon, loss_CPU);
// End timer
TimC.add();
// Print the first zeros
// Number of roots to display
printf("\n********************* RESULTS ********************** \n");
printf("The first %i greater resulting roots (eigen values) are : \n", m);
print_vector(xstar, m, n);
printf("\n\n**************************************************** \n");
printf("******************** COMPARISON ******************** \n");
printf("**************************************************** \n\n\n");
// Print how long it took
printf("GPU timer for root finding (CPU-GPU and GPU-CPU transfers included) : %f s\n", (float)TimG.getsum());
printf("CPU timer for root finding : %f s\n\n", (float)TimC.getsum());
// Print the different errors
printf("GPU average absolute value of spectral function : %f\n", *loss_GPU);
printf("CPU average absolute value of spectral function : %f\n\n\n", *loss_CPU);
// Free memory on CPU
free(d);
free(z);
free(znorm);
free(zsqr);
free(xstar);
free(loss_CPU);
free(loss_GPU);
}
| 1c108242cee03d57befde648f5f31dd2bfc3885a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**************************************************************
The code in time.h is a part of a course on cuda taught by its authors:
Lokman A. Abbas-Turki
**************************************************************/
#include "timer.h"
/**************************************************************
Common functions
**************************************************************/
// Compare function for qsort
int compare_function(const void *a,const void *b) {
float *x = (float *) a;
float *y = (float *) b;
if (*x < *y) return - 1;
else if (*x > *y) return 1;
return 0;
}
// Generate gaussian vector using Box Muller
void gaussian_vector(float *v, float mu, float sigma, int n) {
for (int i = 0; i<n; i++){
float u1 = (float)rand()/(float)(RAND_MAX);
float u2 = (float)rand()/(float)(RAND_MAX);
v[i] = sigma * (sqrtf( -2 * logf(u1)) * cosf(2 * M_PI * u2)) + mu;
}
}
//Function to print a small vector of floats on host
void print_vector(float *c, int m, int n) {
for (int i=0; i<m; i++){
printf("%f ", c[i]);
printf("\n");
}
}
/**************************************************************
CPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
void square_vector(float *z, float *znorm, int n){
for (int i = 0; i < n; i++) {
float zi = z[i];
float zsqri = zi * zi;
z[i] = zsqri;
znorm[0] += zsqri;
}
}
// Function for computing f (the secular function of interest) at a given point x
float secfunc(float *d, float *zsqr, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqr[i] / (d[i] - x);
}
return rho + sum;
}
// Function for computing f' (the prime derivative of the secular function of interest) at a given point x
float secfunc_prime(float *d, float *zsqr, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = d[i];
sum += zsqr[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
float secfunc_second(float *d, float *zsqr, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = d[i];
sum += zsqr[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
float discrimant_int(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
float discrimant_ext(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
float h_secfunc(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
float initialization_int(float *d, float *zsqr, float rho, int k, int n){
float d_k = d[k];
float d_kplus1 = d[k + 1];
float zsqr_k = zsqr[k];
float zsqr_kplus1 = zsqr[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc(d, zsqr, rho, middle, n);
float c = f - h_secfunc(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
float initialization_ext(float *d, float *zsqr, float *znorm, float rho, int n){
float d_nminus1 = d[n - 1];
float d_nminus2 = d[n - 2];
float d_n = d_nminus1 + znorm[0] / rho;
float zsqr_nminus1 = zsqr[n - 1];
float zsqr_nminus2 = zsqr[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc(d, zsqr, rho, middle, n);
if (f <= 0){
float hd = h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
float a_gragg(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
float b_gragg(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
float c_gragg(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
float eta_int(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg(f, fprime, delta_k, delta_kplus1);
float b = b_gragg(f, delta_k, delta_kplus1);
float c = c_gragg(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
float eta_ext(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg(f, delta_nminus2, delta_nminus1);
float c = c_gragg(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
float find_root_int(float *d, float *zsqr, float rho, float x, int k, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float f = secfunc(d, zsqr, rho, x, n);;
float d_k = d[k];
float d_kplus1 = d[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_int(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
if (k%(int)(n/10) == 0){
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", k, x, f, i);
}
*loss_CPU += (float)(abs(f)/n);
return x;
}
// Iterate to find the last root (the exterior one)
float find_root_ext(float *d, float *zsqr, float rho, float x, int n, int maxit, float epsilon, float *loss_CPU){
int i = 0;
float d_nminus2 = d[n - 2];
float d_nminus1 = d[n - 1];
float f = secfunc(d, zsqr, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(d, zsqr, rho, x, n);
float fprime = secfunc_prime(d, zsqr, x, n);
float fsecond = secfunc_second(d, zsqr, x, n);
float eta = eta_ext(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Print the last eigen value
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", n - 1, x, f, i);
*loss_CPU += (float)(abs(f)/n);
return x;
}
void find_roots(float *xstar, float *x0, float *d, float *zsqr, float *znorm, float rho, int n, int maxit, float epsilon, float *loss_CPU){
// We make sure that the loss is set to 0
*loss_CPU =0;
for (int i=0; i<n-1; i++){
xstar[i] = find_root_int(d, zsqr, rho, x0[i], i, n, maxit, epsilon, loss_CPU);
}
xstar[n - 1] = find_root_ext(d, zsqr, rho, x0[n - 1], n, maxit, epsilon, loss_CPU);
}
void initialize_x0(float *x0, float *d, float *zsqr, float *znorm, float rho, int n){
for (int i=0; i<n-1; i++){
x0[i] = initialization_int(d, zsqr, rho, i, n);
}
x0[n - 1] = initialization_ext(d, zsqr, znorm, rho, n);
}
/**************************************************************
GPU functions
**************************************************************/
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
__global__ void square_kernel_g(float *zsqrGPU, float *znormGPU, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while(idx < n){
float zi = zsqrGPU[idx];
float zsqr_i = zi * zi;
zsqrGPU[idx] = zi * zi;
atomicAdd(znormGPU, zsqr_i);
idx += gridDim.x * blockDim.x;
}
}
// Device function for computing f (the secular function of interest) at a given point x
__device__ float secfunc_g(float *dGPU, float *zsqrGPU, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqrGPU[i] / (dGPU[i] - x);
}
return rho + sum;
}
// Device function for computing f' (the prime derivative of the secular function of interest) at a given point x
__device__ float secfunc_prime_g(float *dGPU, float *zsqrGPU, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
__device__ float secfunc_second_g(float *dGPU, float *zsqrGPU, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
__device__ float discrimant_int_g(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
__device__ float discrimant_ext_g(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
__device__ float h_secfunc_g(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_int_g(float *dGPU, float *zsqrGPU, float rho, int k, int n){
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
float zsqr_k = zsqrGPU[k];
float zsqr_kplus1 = zsqrGPU[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
float c = f - h_secfunc_g(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int_g(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int_g(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_ext_g(float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
float d_nminus1 = dGPU[n - 1];
float d_nminus2 = dGPU[n - 2];
float d_n = d_nminus1 + znormGPU[0] / rho;
float zsqr_nminus1 = zsqrGPU[n - 1];
float zsqr_nminus2 = zsqrGPU[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc_g(dGPU, zsqrGPU, rho, middle, n);
if (f <= 0){
float hd = h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc_g(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext_g(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
__device__ float a_gragg_g(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
__device__ float b_gragg_g(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
__device__ float c_gragg_g(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
__device__ float eta_int_g(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg_g(f, fprime, delta_k, delta_kplus1);
float b = b_gragg_g(f, delta_k, delta_kplus1);
float c = c_gragg_g(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int_g(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
__device__ float eta_ext_g(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg_g(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg_g(f, delta_nminus2, delta_nminus1);
float c = c_gragg_g(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext_g(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
__device__ float find_root_int_g(float *dGPU, float *zsqrGPU, float rho, float x, int k, int n, int maxit, float epsilon, float * avloss_GPU){
int i = 0;
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);;
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_int_g(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
if (k%(int)(n/10) == 0){
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", k, x, f, i);
}
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Iterate to find the last root (the exterior one)
__device__ float find_root_ext_g(float *dGPU, float *zsqrGPU, float rho, float x, int n, int maxit, float epsilon, float* avloss_GPU){
int i = 0;
float d_nminus2 = dGPU[n - 2];
float d_nminus1 = dGPU[n - 1];
float f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc_g(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime_g(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second_g(dGPU, zsqrGPU, x, n);
float eta = eta_ext_g(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Print the last eigen value
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", n - 1, x, f, i);
// Save the loss
atomicAdd(avloss_GPU, (float)(abs(f)/n));
return x;
}
// Kernel to launch and distribute the searching of roots among GPU cores
__global__ void find_roots_kernel_g(float *xstarGPU, float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n, int maxit, float epsilon, float *avloss_GPU){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// We make sure that the average loss is set to 0
*avloss_GPU =0;
// First core gets search of the last root (the exterior one)
if (idx == 0){
float x = x0GPU[n - 1];
xstarGPU[n - 1] = find_root_ext_g(dGPU, zsqrGPU, rho, x, n, maxit, epsilon, avloss_GPU);
}
// Each next core searches one interval (interior interval)
else {
while (idx < n) {
float x = x0GPU[idx - 1];
xstarGPU[idx - 1] = find_root_int_g(dGPU, zsqrGPU, rho, x, idx - 1, n, maxit, epsilon, avloss_GPU);
// in case we have not launched enough cores to cover all intervals
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to compute the initial guesses from the paper on GPU
__global__ void initialize_x0_kernel_g(float *x0GPU, float *dGPU, float *zsqrGPU, float *znormGPU, float rho, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core compute the initial guess for last root (the exterior one)
if (idx == 0){
x0GPU[n - 1] = initialization_ext_g(dGPU, zsqrGPU, znormGPU, rho, n);
}
// Each next core compute initial guess for one interval (interior interval)
else {
while (idx < n) {
x0GPU[idx - 1] = initialization_int_g(dGPU, zsqrGPU, rho, idx - 1, n);
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to "wake up" the GPU
__global__ void wake_up(int *test)
{
__shared__ int c;
c = 3;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 1024)
{
test[idx] += c;
}
}
int main (void) {
/****************** Declaration ******************/
// Declare vectors or floats
float *d, *z, *zsqr, *znorm, *x0, *xstar, *loss_GPU, *loss_CPU;
// rho parameter
float rho = 10;
// Size of arrow matrix chosen by the user
//int n= 10;
int n;
printf("\nWhich n (number of roots for the function) do you want? \n");
scanf("%d", &n);
printf("\n \n******************* CHOICE OF N ******************** \n");
printf("n = %d\n", n);
//Maximum number of iterations
int maxit = 1e4;
//Stopping criterion
float epsilon = 1e-6;
// Memory allocation
d = (float*)malloc(n*sizeof(float));
z = (float*)malloc(n*sizeof(float));
zsqr = (float*)malloc(n*sizeof(float));
znorm = (float*)malloc(sizeof(float));
x0 = (float*)malloc(n*sizeof(float));
xstar = (float*)malloc(n*sizeof(float));
loss_GPU = (float*)malloc(sizeof(float));
loss_CPU = (float*)malloc(sizeof(float));
// Create instance of class Timer
Timer TimG, TimC;
//Fill the vector d with linear function of n
for (int i=0; i < n; i++){
d[i] = 2 * n - i;
}
// sort the vector in ascending order
qsort(d, n, sizeof(float), compare_function);
// Gaussian rank 1 perturbation
float mu_z = 5;
float sigma_z = 1;
gaussian_vector(z, mu_z, sigma_z, n);
gaussian_vector(zsqr, mu_z, sigma_z, n);
printf("\n\n**************************************************** \n");
printf("*********************** GPU ************************ \n");
printf("**************************************************** \n\n\n");
printf("********************* CONTROLS ********************* \n");
printf("We print the first, the last and 10 %% of the interior eigenvalues as a check \n");
// We first wake up the GPU if first iteration
int *testGPU;
cudaMalloc(&testGPU, 1024*sizeof(int));
wake_up <<<1024, 512>>> (testGPU);
cudaFree(testGPU);
// Start timer GPU
TimG.start();
/***************** GPU memory alloc *****************/
// Declare vectors on GPU
float *dGPU, *zsqrGPU, *znormGPU, *x0GPU, *xstarGPU, *avloss_GPU;
// Create memory space for vectors on GPU
cudaMalloc(&dGPU, n*sizeof(float));
cudaMalloc(&zsqrGPU, n*sizeof(float));
cudaMalloc(&znormGPU, sizeof(float));
cudaMalloc(&x0GPU, n*sizeof(float));
cudaMalloc(&avloss_GPU, sizeof(float));
// Container for the results
cudaMalloc(&xstarGPU, n*sizeof(float));
/***************** Transfer on GPU *****************/
// Transfers on GPU
cudaMemcpy(dGPU, d, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(zsqrGPU, z, n*sizeof(float), cudaMemcpyHostToDevice);
// We first compute the square and squared norm
square_kernel_g <<<1024, 512>>> (zsqrGPU, znormGPU, n);
// Initialization of x0 on GPU
initialize_x0_kernel_g <<<1024, 512>>> (x0GPU, dGPU, zsqrGPU, znormGPU, rho, n);
/***************** Root computation ****************/
// Find roots on GPU
find_roots_kernel_g <<<1024, 512>>> (xstarGPU, x0GPU, dGPU, zsqrGPU, znormGPU, rho, n, maxit, epsilon, avloss_GPU);
// Transfer results on CPU to print it
cudaMemcpy(xstar, xstarGPU, n*sizeof(float), cudaMemcpyDeviceToHost);
// End timer
TimG.add();
// Collect the average spectral loss
cudaMemcpy(loss_GPU, avloss_GPU, sizeof(float), cudaMemcpyDeviceToHost);
// Print the first zeros
// Number of roots to display
int m = 10;
printf("\n********************* RESULTS ********************** \n");
printf("The first %i resulting roots (eigen values) are : \n", m);
print_vector(xstar, m, n);
// Free memory on GPU
cudaFree(dGPU);
cudaFree(zsqrGPU);
cudaFree(znormGPU);
cudaFree(x0GPU);
cudaFree(xstarGPU);
cudaFree(avloss_GPU);
printf("\n\n**************************************************** \n");
printf("*********************** CPU ************************ \n");
printf("**************************************************** \n\n\n");
printf("********************* CONTROLS ********************* \n");
printf("We print the first, the last and 10 %% of the interior eigenvalues as a check \n");
// Start timer CPU
TimC.start();
// We first compute the square and squared norm
square_vector(zsqr, znorm, n);
// Initialization of x0
initialize_x0(x0, d, zsqr, znorm, rho, n);
/***************** Root computation ****************/
// Find roots
find_roots(xstar, x0, d, zsqr, znorm, rho, n, maxit, epsilon, loss_CPU);
// End timer
TimC.add();
// Print the first zeros
// Number of roots to display
printf("\n********************* RESULTS ********************** \n");
printf("The first %i greater resulting roots (eigen values) are : \n", m);
print_vector(xstar, m, n);
printf("\n\n**************************************************** \n");
printf("******************** COMPARISON ******************** \n");
printf("**************************************************** \n\n\n");
// Print how long it took
printf("GPU timer for root finding (CPU-GPU and GPU-CPU transfers included) : %f s\n", (float)TimG.getsum());
printf("CPU timer for root finding : %f s\n\n", (float)TimC.getsum());
// Print the different errors
printf("GPU average absolute value of spectral function : %f\n", *loss_GPU);
printf("CPU average absolute value of spectral function : %f\n\n\n", *loss_CPU);
// Free memory on CPU
free(d);
free(z);
free(znorm);
free(zsqr);
free(xstar);
free(loss_CPU);
free(loss_GPU);
}
|
20088a38c7bebdc882b1d9c0a88a41e7c806c25b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialReflectionPadding.cu"
#else
void THNN_(SpatialReflectionPadding_updateOutput)(THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR,
int padT, int padB) {
THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, numInputDims == 3 || numInputDims == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 4) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputH = THCTensor_(size)(state, input, dimh);
int inputW = THCTensor_(size)(state, input, dimw);
THArgCheck(padL < inputW && padR < inputW, 4,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padL, padR, dimw, THCTensor_(sizeDesc)(state, input).str);
THArgCheck(padT < inputH && padB < inputH, 6,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padT, padB, dimh, THCTensor_(sizeDesc)(state, input).str);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1 || outputH >= 1, 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
inputH, inputW, outputH, outputW);
THCDeviceTensor<real, 4> devInput;
THCDeviceTensor<real, 4> devOutput;
if (numInputDims == 3) {
THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<real, 3>(state, output).upcastOuter<4>();
} else {
THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 4>(state, input);
devOutput = toDeviceTensor<real, 4>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialReflectionPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devInput, devOutput, padT, padB, padL, padR);
THCudaCheck(hipGetLastError());
}
void THNN_(SpatialReflectionPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR,
int padT, int padB) {
THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input->size[dimh];
int iwidth = input->size[dimw];
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THCTensor_(size)(state, gradOutput, dimh));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 4> devGradInput;
THCDeviceTensor<real, 4> devGradOutput;
if (numInputDims == 3) {
devGradInput = toDeviceTensor<real, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<real, 4>(state, gradInput);
devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialReflectionPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, padT, padB, padL, padR);
THCudaCheck(hipGetLastError());
}
#endif
| 20088a38c7bebdc882b1d9c0a88a41e7c806c25b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialReflectionPadding.cu"
#else
void THNN_(SpatialReflectionPadding_updateOutput)(THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR,
int padT, int padB) {
THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, numInputDims == 3 || numInputDims == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 4) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputH = THCTensor_(size)(state, input, dimh);
int inputW = THCTensor_(size)(state, input, dimw);
THArgCheck(padL < inputW && padR < inputW, 4,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padL, padR, dimw, THCTensor_(sizeDesc)(state, input).str);
THArgCheck(padT < inputH && padB < inputH, 6,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
padT, padB, dimh, THCTensor_(sizeDesc)(state, input).str);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1 || outputH >= 1, 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
inputH, inputW, outputH, outputW);
THCDeviceTensor<real, 4> devInput;
THCDeviceTensor<real, 4> devOutput;
if (numInputDims == 3) {
THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<real, 3>(state, output).upcastOuter<4>();
} else {
THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 4>(state, input);
devOutput = toDeviceTensor<real, 4>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
SpatialReflectionPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devInput, devOutput, padT, padB, padL, padR);
THCudaCheck(cudaGetLastError());
}
void THNN_(SpatialReflectionPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR,
int padT, int padB) {
THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(TensorUtils<THCTensor>::canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input->size[dimh];
int iwidth = input->size[dimw];
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THCTensor_(size)(state, gradOutput, dimh));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 4> devGradInput;
THCDeviceTensor<real, 4> devGradOutput;
if (numInputDims == 3) {
devGradInput = toDeviceTensor<real, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<real, 4>(state, gradInput);
devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
SpatialReflectionPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, padT, padB, padL, padR);
THCudaCheck(cudaGetLastError());
}
#endif
|
bf5f6037bf7023917605a33100c0a5bc134741e6.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialDilatedConvolution.hip"
#else
#include <ATen/div_rtn.h>
static inline void THNN_(SpatialDilatedConvolution_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
THCTensor *weight, THCTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, int weight_nullable) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationW > 0 && dilationH > 0, 14,
"dilation should be greater than 0, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
if (weight != NULL) {
THCUNN_argCheck(state, !weight->is_empty() && weight->dim() == 4, 4, weight,
"non-empty 4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, "
"but got: %s");
if (bias != NULL) {
THCUNN_check_dim_size(state, bias, 1, 0, weight->size(0));
}
} else if (!weight_nullable) {
THError("weight tensor is expected to be non-nullable");
}
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t outputHeight = div_rtn<int64_t>(inputHeight + 2*padH - (dilationH * (kH - 1) + 1), dH) + 1;
int64_t outputWidth = div_rtn<int64_t>(inputWidth + 2*padW - (dilationW * (kW - 1) + 1), dW) + 1;
if (outputWidth < 1 || outputHeight < 1) {
THError("Given input size per channel: (%ld x %ld). "
"Calculated output size per channel: (%ld x %ld). Output size is too small",
inputHeight, inputWidth, outputHeight, outputWidth);
}
if (weight != NULL) {
int64_t nInputPlane = weight->size(1);
THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane);
}
if (gradOutput != NULL) {
if (weight != NULL) {
int64_t nOutputPlane = weight->size(0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
} else if (bias != NULL) {
int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
}
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
void THNN_(SpatialDilatedConvolution_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH) {
THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones);
if (bias) {
THCUNN_assertSameGPU(state, 2, weight, bias);
THArgCheck(THCTensor_(isContiguous)(state, bias), 5, "bias tensor has to be contiguous");
}
THNN_(SpatialDilatedConvolution_shapeCheck)
(state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, 0);
// Params:
int nInputPlane = weight->size(1);
int nOutputPlane = weight->size(0);
input = THCTensor_(newContiguous)(state, input);
weight = THCTensor_(newContiguous)(state, weight);
bias = bias ? THCTensor_(newContiguous)(state, bias) : bias;
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
}
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *output_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t n_ = outputHeight * outputWidth;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n_, m_, k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, ones), k_,
THCTensor_(data)(state, bias), k_,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, output_n), n_
);
} else {
THCTensor_(zero)(state, output_n);
}
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
dilationH, dilationW,
THCTensor_(data)(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = columns->size(1);
int64_t k = nInputPlane*kH*kW;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 'n',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, columns), n,
THCTensor_(data)(state, weight), k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, output_n), n
);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, output_n);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
if (bias) THCTensor_(free)(state, bias);
}
void THNN_(SpatialDilatedConvolution_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradColumns,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
THNN_(SpatialDilatedConvolution_shapeCheck)
(state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, 0);
// Params
int nInputPlane = weight->size(1);
int nOutputPlane = weight->size(0);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
weight = THCTensor_(newContiguous)(state, weight);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCTensor_(resize2d)(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *gradInput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCTensor_(select)(state, gradInput_n, gradInput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nInputPlane*kW*kH;
int64_t n = gradColumns->size(1);
int64_t k = nOutputPlane;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 't',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradOutput_n), n,
THCTensor_(data)(state, weight), m,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, gradColumns), n
);
// Unpack columns back into input:
col2im<scalar_t, accreal>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, gradColumns),
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
dilationH, dilationW,
THCTensor_(data)(state, gradInput_n)
);
}
// Free
THCTensor_(free)(state, gradInput_n);
THCTensor_(free)(state, gradOutput_n);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialDilatedConvolution_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
accreal scale_) {
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones);
if (gradBias) {
THCUNN_assertSameGPU(state, 2, gradWeight, gradBias);
}
THNN_(SpatialDilatedConvolution_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, 1);
if (gradWeight) {
THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous");
}
if (gradBias) {
THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous");
THArgCheck(THCTensor_(isContiguous)(state, ones), 7, "ones needs to be contiguous");
}
// Params
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t nInputPlane = input->size(1);
int64_t nOutputPlane = gradOutput->size(1);
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Define a buffer of ones, for bias accumulation
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// Do Weight:
if (gradWeight) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
dilationH, dilationW,
THCTensor_(data)(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = nInputPlane*kW*kH;
int64_t k = columns->size(1);
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n, m, k,
scale,
THCTensor_(data)(state, columns), k,
THCTensor_(data)(state, gradOutput_n), k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradWeight), n
);
}
// Do Bias:
if (gradBias) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(
#endif
state,
't',
k_, m_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), 1,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), 1
);
#endif
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(
state,
't', 'n',
m_, 1, k_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), m_
);
#endif
}
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, gradOutput_n);
// Resize
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| bf5f6037bf7023917605a33100c0a5bc134741e6.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialDilatedConvolution.cu"
#else
#include <ATen/div_rtn.h>
static inline void THNN_(SpatialDilatedConvolution_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
THCTensor *weight, THCTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, int weight_nullable) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationW > 0 && dilationH > 0, 14,
"dilation should be greater than 0, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
if (weight != NULL) {
THCUNN_argCheck(state, !weight->is_empty() && weight->dim() == 4, 4, weight,
"non-empty 4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, "
"but got: %s");
if (bias != NULL) {
THCUNN_check_dim_size(state, bias, 1, 0, weight->size(0));
}
} else if (!weight_nullable) {
THError("weight tensor is expected to be non-nullable");
}
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t outputHeight = div_rtn<int64_t>(inputHeight + 2*padH - (dilationH * (kH - 1) + 1), dH) + 1;
int64_t outputWidth = div_rtn<int64_t>(inputWidth + 2*padW - (dilationW * (kW - 1) + 1), dW) + 1;
if (outputWidth < 1 || outputHeight < 1) {
THError("Given input size per channel: (%ld x %ld). "
"Calculated output size per channel: (%ld x %ld). Output size is too small",
inputHeight, inputWidth, outputHeight, outputWidth);
}
if (weight != NULL) {
int64_t nInputPlane = weight->size(1);
THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane);
}
if (gradOutput != NULL) {
if (weight != NULL) {
int64_t nOutputPlane = weight->size(0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
} else if (bias != NULL) {
int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0);
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
}
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
void THNN_(SpatialDilatedConvolution_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH) {
THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones);
if (bias) {
THCUNN_assertSameGPU(state, 2, weight, bias);
THArgCheck(THCTensor_(isContiguous)(state, bias), 5, "bias tensor has to be contiguous");
}
THNN_(SpatialDilatedConvolution_shapeCheck)
(state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, 0);
// Params:
int nInputPlane = weight->size(1);
int nOutputPlane = weight->size(0);
input = THCTensor_(newContiguous)(state, input);
weight = THCTensor_(newContiguous)(state, weight);
bias = bias ? THCTensor_(newContiguous)(state, bias) : bias;
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
}
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *output_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t n_ = outputHeight * outputWidth;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n_, m_, k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, ones), k_,
THCTensor_(data)(state, bias), k_,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, output_n), n_
);
} else {
THCTensor_(zero)(state, output_n);
}
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
dilationH, dilationW,
THCTensor_(data)(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = columns->size(1);
int64_t k = nInputPlane*kH*kW;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 'n',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, columns), n,
THCTensor_(data)(state, weight), k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, output_n), n
);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, output_n);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
if (bias) THCTensor_(free)(state, bias);
}
void THNN_(SpatialDilatedConvolution_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradColumns,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH) {
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
THNN_(SpatialDilatedConvolution_shapeCheck)
(state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, 0);
// Params
int nInputPlane = weight->size(1);
int nOutputPlane = weight->size(0);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
weight = THCTensor_(newContiguous)(state, weight);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCTensor_(resize2d)(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *gradInput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCTensor_(select)(state, gradInput_n, gradInput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nInputPlane*kW*kH;
int64_t n = gradColumns->size(1);
int64_t k = nOutputPlane;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
'n', 't',
n, m, k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradOutput_n), n,
THCTensor_(data)(state, weight), m,
ScalarConvert<int, scalar_t>::to(0),
THCTensor_(data)(state, gradColumns), n
);
// Unpack columns back into input:
col2im<scalar_t, accreal>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, gradColumns),
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
dilationH, dilationW,
THCTensor_(data)(state, gradInput_n)
);
}
// Free
THCTensor_(free)(state, gradInput_n);
THCTensor_(free)(state, gradOutput_n);
// Resize output
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialDilatedConvolution_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *columns,
THCTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
accreal scale_) {
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones);
if (gradBias) {
THCUNN_assertSameGPU(state, 2, gradWeight, gradBias);
}
THNN_(SpatialDilatedConvolution_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
dilationH, dilationW, 1);
if (gradWeight) {
THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous");
}
if (gradBias) {
THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous");
THArgCheck(THCTensor_(isContiguous)(state, ones), 7, "ones needs to be contiguous");
}
// Params
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int is_batch = 1;
if (input->dim() == 3) {
// Force batch
is_batch = 0;
THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2));
THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2));
}
int64_t nInputPlane = input->size(1);
int64_t nOutputPlane = gradOutput->size(1);
int64_t inputWidth = input->size(3);
int64_t inputHeight = input->size(2);
int64_t outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int64_t outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1;
// Batch size + input planes
int64_t batchSize = input->size(0);
// Define a buffer of ones, for bias accumulation
if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCTensor_(resize2d)(state, ones, outputHeight, outputWidth);
THCTensor_(fill)(state, ones, ScalarConvert<int, scalar_t>::to(1));
}
// Resize temporary columns
THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
// Do Weight:
if (gradWeight) {
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
dilationH, dilationW,
THCTensor_(data)(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = nOutputPlane;
int64_t n = nInputPlane*kW*kH;
int64_t k = columns->size(1);
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemm(
#elif defined(THC_REAL_IS_HALF)
THCudaBlas_Hgemm(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemm(
#endif
state,
't', 'n',
n, m, k,
scale,
THCTensor_(data)(state, columns), k,
THCTensor_(data)(state, gradOutput_n), k,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradWeight), n
);
}
// Do Bias:
if (gradBias) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = nOutputPlane;
int64_t k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef THC_REAL_IS_FLOAT
THCudaBlas_Sgemv(
#elif defined(THC_REAL_IS_DOUBLE)
THCudaBlas_Dgemv(
#endif
state,
't',
k_, m_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), 1,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), 1
);
#endif
#ifdef THC_REAL_IS_HALF
THCudaBlas_Hgemm(
state,
't', 'n',
m_, 1, k_,
scale,
THCTensor_(data)(state, gradOutput_n), k_,
THCTensor_(data)(state, ones), k_,
ScalarConvert<int, scalar_t>::to(1),
THCTensor_(data)(state, gradBias), m_
);
#endif
}
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, gradOutput_n);
// Resize
if (is_batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
a7fabceec9d852bc357ca2b16b07f31b6f834044.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
const int CUDA_NUM_THREADS = 512;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
void RandomizeFloat(void* dest, const int count, const int seed) {
float* ptr = static_cast<float*>(dest);
hiprandGenerator_t gen;
CURAND_CHECK(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MTGP32));
CURAND_CHECK(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
CURAND_CHECK(hiprandGenerateUniform(gen, ptr, count));
CURAND_CHECK(hiprandDestroyGenerator(gen));
CUDA_CHECK(hipDeviceSynchronize());
}
__global__ void KeFeedInputFloat(float * dest, const int count, float * src, const int size) {
int offset = (threadIdx.x + blockDim.x * blockIdx.x) % size;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < count; i += gridDim.x * blockDim.x) {
dest[i] = src[offset];
offset = (offset+1) % size;
}
}
void FeedInputFloat(float * dest, const int count, const float * src, const int size) {
float* g_src;
CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&g_src), size*sizeof(float)));
CUDA_CHECK(hipMemcpy(g_src, src, size*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( KeFeedInputFloat), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0,
dest, count, g_src, size);
CUDA_CHECK(hipFree(g_src));
}
template<typename T, int BSIZE>
__global__ void deltaKe(const T* A, const T* B, size_t count, double* dmax, int rank) {
__shared__ double temp[BSIZE];
int tid = threadIdx.x;
double locmax = 0.0;
for(int i=tid; i<count; i+=blockDim.x) {
double delta = fabs((double)(A[i] - B[i]));
if( delta > locmax ) {
locmax = delta;
if (delta > 0.001 && rank == 0) printf("Error at %d/%d : %f != %f, del=%lf\n", i, (int)count, (float)A[i], (float)B[i], delta);
}
}
temp[tid] = locmax;
for(int stride = BSIZE/2; stride > 1; stride>>=1) {
__syncthreads();
if( tid < stride )
temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride];
}
__syncthreads();
if( threadIdx.x == 0)
*dmax = temp[0] > temp[1] ? temp[0] : temp[1];
}
__global__ void setDelta(double* dmax, double value) {
*dmax = value;
}
void SetDelta(double* dmax, double value) {
hipLaunchKernelGGL(( setDelta), dim3(1), dim3(1), 0, 0, dmax, value);
}
void CheckDelta(float* dst, float* dst_test, size_t count, double* dmax, int rank, hipStream_t stream) {
hipLaunchKernelGGL(( deltaKe<float, 512>), dim3(1), dim3(512), 0, stream, dst, dst_test, count, dmax, rank);
}
| a7fabceec9d852bc357ca2b16b07f31b6f834044.cu | #include "common.h"
const int CUDA_NUM_THREADS = 512;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
void RandomizeFloat(void* dest, const int count, const int seed) {
float* ptr = static_cast<float*>(dest);
curandGenerator_t gen;
CURAND_CHECK(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32));
CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(gen, seed));
CURAND_CHECK(curandGenerateUniform(gen, ptr, count));
CURAND_CHECK(curandDestroyGenerator(gen));
CUDA_CHECK(cudaDeviceSynchronize());
}
__global__ void KeFeedInputFloat(float * dest, const int count, float * src, const int size) {
int offset = (threadIdx.x + blockDim.x * blockIdx.x) % size;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < count; i += gridDim.x * blockDim.x) {
dest[i] = src[offset];
offset = (offset+1) % size;
}
}
void FeedInputFloat(float * dest, const int count, const float * src, const int size) {
float* g_src;
CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&g_src), size*sizeof(float)));
CUDA_CHECK(cudaMemcpy(g_src, src, size*sizeof(float), cudaMemcpyHostToDevice));
KeFeedInputFloat<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
dest, count, g_src, size);
CUDA_CHECK(cudaFree(g_src));
}
template<typename T, int BSIZE>
__global__ void deltaKe(const T* A, const T* B, size_t count, double* dmax, int rank) {
__shared__ double temp[BSIZE];
int tid = threadIdx.x;
double locmax = 0.0;
for(int i=tid; i<count; i+=blockDim.x) {
double delta = fabs((double)(A[i] - B[i]));
if( delta > locmax ) {
locmax = delta;
if (delta > 0.001 && rank == 0) printf("Error at %d/%d : %f != %f, del=%lf\n", i, (int)count, (float)A[i], (float)B[i], delta);
}
}
temp[tid] = locmax;
for(int stride = BSIZE/2; stride > 1; stride>>=1) {
__syncthreads();
if( tid < stride )
temp[tid] = temp[tid] > temp[tid+stride] ? temp[tid] : temp[tid+stride];
}
__syncthreads();
if( threadIdx.x == 0)
*dmax = temp[0] > temp[1] ? temp[0] : temp[1];
}
__global__ void setDelta(double* dmax, double value) {
*dmax = value;
}
void SetDelta(double* dmax, double value) {
setDelta<<<1, 1>>>(dmax, value);
}
void CheckDelta(float* dst, float* dst_test, size_t count, double* dmax, int rank, cudaStream_t stream) {
deltaKe<float, 512><<<1, 512, 0, stream>>>(dst, dst_test, count, dmax, rank);
}
|
3459e618a0a10e923b8931978eeab0e7b21a6b11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hipfft.h>
#include <cutil_inline.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float);
// Filtering functions
void Convolve(const Complex*, int, const Complex*, int, Complex*);
// Padding functions
int PadData(const Complex*, Complex**, int,
const Complex*, Complex**, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
srand(2010);
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
// Allocate host memory for the signal
Complex* h_signal = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) {
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex* h_filter_kernel = (Complex*)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) {
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex* h_padded_signal;
Complex* h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex* d_signal;
cutilSafeCall(hipMalloc((void**)&d_signal, mem_size));
// Copy host memory to device
cutilSafeCall(hipMemcpy(d_signal, h_padded_signal, mem_size,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex* d_filter_kernel;
cutilSafeCall(hipMalloc((void**)&d_filter_kernel, mem_size));
// Copy host memory to device
cutilSafeCall(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// CUFFT plan
hipfftHandle plan;
cufftSafeCall(hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1));
// Transform signal and kernel
cufftSafeCall(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD));
cufftSafeCall(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD));
// Multiply the coefficients together and normalize the result
hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(32), dim3(256), 0, 0, d_signal, d_filter_kernel, new_size, 1.0f / new_size);
// Check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
cufftSafeCall(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
Complex* h_convolved_signal = h_padded_signal;
cutilSafeCall(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
CUTBoolean res = cutCompareL2fe((float*)h_convolved_signal_ref, (float*)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
//Destroy CUFFT context
cufftSafeCall(hipfftDestroy(plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
cutilSafeCall(hipFree(d_signal));
cutilSafeCall(hipFree(d_filter_kernel));
hipDeviceReset();
}
// Pad data
int PadData(const Complex* signal, Complex** padded_signal, int signal_size,
const Complex* filter_kernel, Complex** padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex* new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex* signal, int signal_size,
const Complex* filter_kernel, int filter_kernel_size,
Complex* filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i) {
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j) {
int k = i + j;
if (k >= 0 && k < signal_size)
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
| 3459e618a0a10e923b8931978eeab0e7b21a6b11.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cufft.h>
#include <cutil_inline.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float);
// Filtering functions
void Convolve(const Complex*, int, const Complex*, int, Complex*);
// Padding functions
int PadData(const Complex*, Complex**, int,
const Complex*, Complex**, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
srand(2010);
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// Allocate host memory for the signal
Complex* h_signal = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) {
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex* h_filter_kernel = (Complex*)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) {
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex* h_padded_signal;
Complex* h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex* d_signal;
cutilSafeCall(cudaMalloc((void**)&d_signal, mem_size));
// Copy host memory to device
cutilSafeCall(cudaMemcpy(d_signal, h_padded_signal, mem_size,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex* d_filter_kernel;
cutilSafeCall(cudaMalloc((void**)&d_filter_kernel, mem_size));
// Copy host memory to device
cutilSafeCall(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// CUFFT plan
cufftHandle plan;
cufftSafeCall(cufftPlan1d(&plan, new_size, CUFFT_C2C, 1));
// Transform signal and kernel
cufftSafeCall(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD));
cufftSafeCall(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD));
// Multiply the coefficients together and normalize the result
ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size);
// Check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
cufftSafeCall(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE));
// Copy device memory to host
Complex* h_convolved_signal = h_padded_signal;
cutilSafeCall(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
CUTBoolean res = cutCompareL2fe((float*)h_convolved_signal_ref, (float*)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
//Destroy CUFFT context
cufftSafeCall(cufftDestroy(plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
cutilSafeCall(cudaFree(d_signal));
cutilSafeCall(cudaFree(d_filter_kernel));
cudaThreadExit();
}
// Pad data
int PadData(const Complex* signal, Complex** padded_signal, int signal_size,
const Complex* filter_kernel, Complex** padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex* new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex* signal, int signal_size,
const Complex* filter_kernel, int filter_kernel_size,
Complex* filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i) {
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j) {
int k = i + j;
if (k >= 0 && k < signal_size)
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
|
819e12c19681632056732a4a694ff31fb368ec96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MatrixOp(int *arr, int N) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int swapVar;
if(i<N && j<N) {
if(j%2==0 && (j+1)!=N) {
// swap elements
swapVar = arr[i*N + j];
arr[i*N + j] = arr[i*N+j+1];
arr[i*N+j+1] = swapVar;
}
__syncthreads();
if(i > j){
arr[j*N + i] = arr[i*N+j];
}
}
} | 819e12c19681632056732a4a694ff31fb368ec96.cu | #include "includes.h"
__global__ void MatrixOp(int *arr, int N) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int swapVar;
if(i<N && j<N) {
if(j%2==0 && (j+1)!=N) {
// swap elements
swapVar = arr[i*N + j];
arr[i*N + j] = arr[i*N+j+1];
arr[i*N+j+1] = swapVar;
}
__syncthreads();
if(i > j){
arr[j*N + i] = arr[i*N+j];
}
}
} |
bbd3e0eca823dcf008dc37ffcb9b19beb3f84b24.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
__global__ void cuda_sort(int* arr_d,int* out, int* histogram_d, int size, int max_val);
__host__ void counting_sort(int arr[], int size, int max_val)
{
int* histogram_d;
if(size>7000000 || max_val > 30000000) {
int* arr_d;
int* out;
hipMalloc((void**)&arr_d,size*sizeof(int));
hipMalloc((void**)&out ,size*sizeof(int));
hipMemcpy(arr_d,arr,size*sizeof(int),hipMemcpyHostToDevice);
hipMalloc((void**)&histogram_d,max_val*sizeof(int));
hipMemset((void**)&histogram_d,0,max_val*sizeof(int));
hipLaunchKernelGGL(( cuda_sort), dim3(ceil(size/1024)),dim3(1024),size*sizeof(int), 0, arr_d,out, histogram_d, size, max_val);
// copy to host & finsh
hipMemcpy(arr,out,size*sizeof(int),hipMemcpyDeviceToHost);
hipFree(arr_d); hipFree(histogram_d);
hipFree(out);
}else {
histogram_d = (int*)calloc(max_val,sizeof(int));
for(int i=0; i<size; i++){
histogram_d[arr[i]]++;
}
int index =0;
for(int i=0; i<max_val; i++){
for(int j=0; j<histogram_d[i]; j++){
arr[index++] = i;
}
}
free(histogram_d);
}
}
__global__ void cuda_sort(int* arr_d,int* out, int* histogram_d, int size, int max_val){
extern __shared__ int position[];
int i = threadIdx.x + blockDim.x * blockIdx.x;
//__device__ int histogram_d[max_val];
if(i<size){
atomicAdd(&histogram_d[arr_d[i]],1);
}
__syncthreads();
if(i<max_val){
if(i==0) position[0] = 0;
else {
position[i] = histogram_d[i-1];
for(int j=0; j<i-1; j++){
position[i] += histogram_d[j];
}
}
}
__syncthreads();
if(i<max_val){
for(int j=0; j<histogram_d[i]; j++){
out[position[i]+j] = i;
}
}
// device code
}
| bbd3e0eca823dcf008dc37ffcb9b19beb3f84b24.cu | #include <cuda.h>
#include <cmath>
__global__ void cuda_sort(int* arr_d,int* out, int* histogram_d, int size, int max_val);
__host__ void counting_sort(int arr[], int size, int max_val)
{
int* histogram_d;
if(size>7000000 || max_val > 30000000) {
int* arr_d;
int* out;
cudaMalloc((void**)&arr_d,size*sizeof(int));
cudaMalloc((void**)&out ,size*sizeof(int));
cudaMemcpy(arr_d,arr,size*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc((void**)&histogram_d,max_val*sizeof(int));
cudaMemset((void**)&histogram_d,0,max_val*sizeof(int));
cuda_sort<<<ceil(size/1024),1024,size*sizeof(int)>>>(arr_d,out, histogram_d, size, max_val);
// copy to host & finsh
cudaMemcpy(arr,out,size*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(arr_d); cudaFree(histogram_d);
cudaFree(out);
}else {
histogram_d = (int*)calloc(max_val,sizeof(int));
for(int i=0; i<size; i++){
histogram_d[arr[i]]++;
}
int index =0;
for(int i=0; i<max_val; i++){
for(int j=0; j<histogram_d[i]; j++){
arr[index++] = i;
}
}
free(histogram_d);
}
}
__global__ void cuda_sort(int* arr_d,int* out, int* histogram_d, int size, int max_val){
extern __shared__ int position[];
int i = threadIdx.x + blockDim.x * blockIdx.x;
//__device__ int histogram_d[max_val];
if(i<size){
atomicAdd(&histogram_d[arr_d[i]],1);
}
__syncthreads();
if(i<max_val){
if(i==0) position[0] = 0;
else {
position[i] = histogram_d[i-1];
for(int j=0; j<i-1; j++){
position[i] += histogram_d[j];
}
}
}
__syncthreads();
if(i<max_val){
for(int j=0; j<histogram_d[i]; j++){
out[position[i]+j] = i;
}
}
// device code
}
|
8bd41e418d4d6b8dd7308abb8edf0396648a9760.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/top_k_data.hpp>
#include <nbla/cuda/utils/top_k.cuh>
#include <nbla/variable.hpp>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace nbla {
namespace top_k_data {
template <bool REDUCE, typename T>
__global__ void copy_index_and_value(const int k,
const unsigned int *sorted_idx, const T *x,
T *y, unsigned int *top_k_idx) {
NBLA_CUDA_KERNEL_LOOP(i, k) {
const auto idx = sorted_idx[i];
y[REDUCE ? i : idx] = x[idx];
top_k_idx[i] = idx;
}
}
template <bool REDUCE, typename T>
__global__ void copy_index_and_value(const int k, const ValIdx<T> *sorted,
const T *x, T *y,
unsigned int *top_k_idx) {
NBLA_CUDA_KERNEL_LOOP(i, k) {
const auto idx = sorted[i].index();
y[REDUCE ? i : idx] = x[idx];
top_k_idx[i] = idx;
}
}
template <typename T> __global__ void set_to_zero(const int size, T *data) {
NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = 0; }
}
template <typename T> __global__ void set_to_absolute(const int size, T *data) {
NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = abs(data[i]); }
}
template <typename T>
__global__ void add_gradient(const int size, const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[i] += g_y[i]; }
}
template <typename T>
__global__ void add_gradient(const int size, const unsigned int *idx,
const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[idx[i]] += g_y[i]; }
}
template <typename T>
__global__ void set_gradient(const int size, const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[i] = g_y[i]; }
}
template <typename T>
__global__ void set_gradient(const int size, const unsigned int *idx,
const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[idx[i]] = g_y[i]; }
}
} // namspace top_k_data
template <typename T>
void TopKDataCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
TopKData<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
if (this->k_ > 1024) {
this->buffer_ = make_shared<CudaCachedArray>(
this->ss_, get_dtype<unsigned int>(), this->ctx_);
} else {
this->buffer_ = make_shared<CudaCachedArray>(sizeof(Buffer<Tcu>),
get_dtype<char>(), this->ctx_);
}
}
template <typename T>
void TopKDataCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
using namespace top_k_data;
cuda_set_device(this->device_);
const auto x = inputs[0];
const auto y = outputs[0];
auto x_data = x->get_data_pointer<Tcu>(this->ctx_);
auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto tk_idx =
(reinterpret_cast<Variable &>(this->top_k_idx_)
.cast_data_and_get_pointer<unsigned int>(this->ctx_, true));
if (!this->reduce_)
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_zero, y->size(), y_data);
if (this->k_ > 1024) {
// For large K we use thrust sort_by_key to do a radix sort of
// data and index. This is not very efficient but large K is not
// he expected use case. The code could be splitting the input
// into a smaller partition of the k-th largest values before
// sorting.
auto buffer_raw = this->buffer_->template pointer<unsigned int>();
auto buffer_ptr = thrust::device_pointer_cast(buffer_raw);
for (int s = 0; s < this->ns_; s++) {
auto x_data_vec = thrust::device_vector<Tcu>(x_data, x_data + this->ss_);
auto sorted_val = thrust::raw_pointer_cast(x_data_vec.data());
auto sorted_idx = thrust::raw_pointer_cast(buffer_ptr);
if (this->abs_) {
auto raw_ptr = thrust::raw_pointer_cast(x_data_vec.data());
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_absolute, this->ss_, raw_ptr);
}
thrust::sequence(buffer_ptr, buffer_ptr + this->ss_);
thrust::sort_by_key(x_data_vec.begin(), x_data_vec.end(), buffer_ptr,
thrust::greater<Tcu>());
if (this->reduce_) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<true>, this->k_,
sorted_idx, x_data, y_data, tk_idx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<false>, this->k_,
sorted_idx, x_data, y_data, tk_idx);
}
x_data += this->ss_; // increase by input sample size
y_data += this->fs_; // increase by output feature size
tk_idx += this->k_;
}
} else {
auto buffer = this->buffer_->template pointer<Buffer<Tcu>>();
for (int s = 0; s < this->ns_; s++) {
if (this->abs_) {
top_k<Tcu, true>(x_data, this->ss_, this->k_, buffer);
} else {
top_k<Tcu, false>(x_data, this->ss_, this->k_, buffer);
}
if (this->reduce_) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<true>, this->k_,
&buffer->sorted[0], x_data, y_data,
tk_idx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<false>, this->k_,
&buffer->sorted[0], x_data, y_data,
tk_idx);
}
x_data += this->ss_; // increase by input sample size
y_data += this->fs_; // increase by output feature size
tk_idx += this->k_;
}
}
this->forward_done_ = true;
}
template <typename T>
void TopKDataCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum_gradient) {
if (!(propagate_down[0]))
return;
NBLA_CHECK(this->forward_done_, error_code::value,
"Forward must be called before calling backward.");
using namespace top_k_data;
cuda_set_device(this->device_);
const auto x = inputs[0];
const auto y = outputs[0];
auto g_y = y->get_grad_pointer<Tcu>(this->ctx_);
auto idx = (reinterpret_cast<Variable &>(this->top_k_idx_)
.get_data_pointer<unsigned int>(this->ctx_));
if (this->reduce_) {
if (accum_gradient[0]) {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
for (int s = 0; s < this->ns_; s++) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, this->k_, idx, g_y, g_x);
g_x += this->ss_;
g_y += this->fs_;
idx += this->k_;
}
} else {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_zero, x->size(), g_x);
for (int s = 0; s < this->ns_; s++) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, this->k_, idx, g_y, g_x);
g_x += this->ss_;
g_y += this->fs_;
idx += this->k_;
}
}
} else {
if (accum_gradient[0]) {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, x->size(), g_y, g_x);
} else {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, x->size(), g_y, g_x);
}
}
}
} // namespace nnabla
| 8bd41e418d4d6b8dd7308abb8edf0396648a9760.cu | // Copyright (c) 2018 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/top_k_data.hpp>
#include <nbla/cuda/utils/top_k.cuh>
#include <nbla/variable.hpp>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace nbla {
namespace top_k_data {
template <bool REDUCE, typename T>
__global__ void copy_index_and_value(const int k,
const unsigned int *sorted_idx, const T *x,
T *y, unsigned int *top_k_idx) {
NBLA_CUDA_KERNEL_LOOP(i, k) {
const auto idx = sorted_idx[i];
y[REDUCE ? i : idx] = x[idx];
top_k_idx[i] = idx;
}
}
template <bool REDUCE, typename T>
__global__ void copy_index_and_value(const int k, const ValIdx<T> *sorted,
const T *x, T *y,
unsigned int *top_k_idx) {
NBLA_CUDA_KERNEL_LOOP(i, k) {
const auto idx = sorted[i].index();
y[REDUCE ? i : idx] = x[idx];
top_k_idx[i] = idx;
}
}
template <typename T> __global__ void set_to_zero(const int size, T *data) {
NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = 0; }
}
template <typename T> __global__ void set_to_absolute(const int size, T *data) {
NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] = abs(data[i]); }
}
template <typename T>
__global__ void add_gradient(const int size, const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[i] += g_y[i]; }
}
template <typename T>
__global__ void add_gradient(const int size, const unsigned int *idx,
const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[idx[i]] += g_y[i]; }
}
template <typename T>
__global__ void set_gradient(const int size, const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[i] = g_y[i]; }
}
template <typename T>
__global__ void set_gradient(const int size, const unsigned int *idx,
const T *g_y, T *g_x) {
NBLA_CUDA_KERNEL_LOOP(i, size) { g_x[idx[i]] = g_y[i]; }
}
} // namspace top_k_data
template <typename T>
void TopKDataCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
TopKData<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
if (this->k_ > 1024) {
this->buffer_ = make_shared<CudaCachedArray>(
this->ss_, get_dtype<unsigned int>(), this->ctx_);
} else {
this->buffer_ = make_shared<CudaCachedArray>(sizeof(Buffer<Tcu>),
get_dtype<char>(), this->ctx_);
}
}
template <typename T>
void TopKDataCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
using namespace top_k_data;
cuda_set_device(this->device_);
const auto x = inputs[0];
const auto y = outputs[0];
auto x_data = x->get_data_pointer<Tcu>(this->ctx_);
auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto tk_idx =
(reinterpret_cast<Variable &>(this->top_k_idx_)
.cast_data_and_get_pointer<unsigned int>(this->ctx_, true));
if (!this->reduce_)
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_zero, y->size(), y_data);
if (this->k_ > 1024) {
// For large K we use thrust sort_by_key to do a radix sort of
// data and index. This is not very efficient but large K is not
// he expected use case. The code could be splitting the input
// into a smaller partition of the k-th largest values before
// sorting.
auto buffer_raw = this->buffer_->template pointer<unsigned int>();
auto buffer_ptr = thrust::device_pointer_cast(buffer_raw);
for (int s = 0; s < this->ns_; s++) {
auto x_data_vec = thrust::device_vector<Tcu>(x_data, x_data + this->ss_);
auto sorted_val = thrust::raw_pointer_cast(x_data_vec.data());
auto sorted_idx = thrust::raw_pointer_cast(buffer_ptr);
if (this->abs_) {
auto raw_ptr = thrust::raw_pointer_cast(x_data_vec.data());
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_absolute, this->ss_, raw_ptr);
}
thrust::sequence(buffer_ptr, buffer_ptr + this->ss_);
thrust::sort_by_key(x_data_vec.begin(), x_data_vec.end(), buffer_ptr,
thrust::greater<Tcu>());
if (this->reduce_) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<true>, this->k_,
sorted_idx, x_data, y_data, tk_idx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<false>, this->k_,
sorted_idx, x_data, y_data, tk_idx);
}
x_data += this->ss_; // increase by input sample size
y_data += this->fs_; // increase by output feature size
tk_idx += this->k_;
}
} else {
auto buffer = this->buffer_->template pointer<Buffer<Tcu>>();
for (int s = 0; s < this->ns_; s++) {
if (this->abs_) {
top_k<Tcu, true>(x_data, this->ss_, this->k_, buffer);
} else {
top_k<Tcu, false>(x_data, this->ss_, this->k_, buffer);
}
if (this->reduce_) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<true>, this->k_,
&buffer->sorted[0], x_data, y_data,
tk_idx);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(copy_index_and_value<false>, this->k_,
&buffer->sorted[0], x_data, y_data,
tk_idx);
}
x_data += this->ss_; // increase by input sample size
y_data += this->fs_; // increase by output feature size
tk_idx += this->k_;
}
}
this->forward_done_ = true;
}
template <typename T>
void TopKDataCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum_gradient) {
if (!(propagate_down[0]))
return;
NBLA_CHECK(this->forward_done_, error_code::value,
"Forward must be called before calling backward.");
using namespace top_k_data;
cuda_set_device(this->device_);
const auto x = inputs[0];
const auto y = outputs[0];
auto g_y = y->get_grad_pointer<Tcu>(this->ctx_);
auto idx = (reinterpret_cast<Variable &>(this->top_k_idx_)
.get_data_pointer<unsigned int>(this->ctx_));
if (this->reduce_) {
if (accum_gradient[0]) {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
for (int s = 0; s < this->ns_; s++) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, this->k_, idx, g_y, g_x);
g_x += this->ss_;
g_y += this->fs_;
idx += this->k_;
}
} else {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_to_zero, x->size(), g_x);
for (int s = 0; s < this->ns_; s++) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, this->k_, idx, g_y, g_x);
g_x += this->ss_;
g_y += this->fs_;
idx += this->k_;
}
}
} else {
if (accum_gradient[0]) {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(add_gradient, x->size(), g_y, g_x);
} else {
auto g_x = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(set_gradient, x->size(), g_y, g_x);
}
}
}
} // namespace nnabla
|
6a21a9dddaea757c08bd14162c7b890edb2e80a6.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSimplePolygon.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSimplePolygon
template hipError_t gpu_hpmc_free_volume<ShapeSimplePolygon>(const hpmc_free_volume_args_t &args,
const typename ShapeSimplePolygon::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeSimplePolygon>(const hpmc_args_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeSimplePolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSimplePolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapeSimplePolygon>(const hpmc_implicit_args_new_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeSimplePolygon>(const hpmc_implicit_args_new_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 6a21a9dddaea757c08bd14162c7b890edb2e80a6.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSimplePolygon.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSimplePolygon
template cudaError_t gpu_hpmc_free_volume<ShapeSimplePolygon>(const hpmc_free_volume_args_t &args,
const typename ShapeSimplePolygon::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeSimplePolygon>(const hpmc_args_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeSimplePolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSimplePolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeSimplePolygon>(const hpmc_implicit_args_new_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeSimplePolygon>(const hpmc_implicit_args_new_t& args,
const typename ShapeSimplePolygon::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
9b958478a685d9501bdb2b2c2b50a16c9901b3fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Zhe Chen
#include <iostream>
#include <cmath>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "utils.h"
using namespace std;
#define BLOCK_DIM 32
#define BLOCK_DIM_IN 30
double Residual(int N, double *U, double *F){
double h=1.0/(N+1.0);
double res=0.0, res_1=0.0;
#pragma omp parallel for shared(U,F) private(res_1)\
reduction(+:res)
for (int j=1;j<=N;j++){
for (int i=1;i<=N;i++){
res_1=(-U[(N+2)*j+i-1]-U[(N+2)*(j-1)+i]-U[(N+2)*j+i+1]-U[(N+2)*(j+1)+i]+4.0*U[(N+2)*j+i])/h/h-F[(N+2)*j+i];
res+=res_1*res_1;
}
}
res=sqrt(res);
return res;
}
void gs2D_cpu(int N, double *U, double *F, int maxit, int num_threads){
//black red points version of Gaiss-Seidel algorithm.
#if defined(_OPENMP)
int threads_all = omp_get_num_procs();
cout << "Number of cpus in this machine: " << threads_all << endl;
omp_set_num_threads(num_threads);
cout << "Use " << num_threads << " threads" << endl;
#endif
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
while (rel_res>tol){
#pragma omp parallel shared(U)
{
//red points
#pragma omp for
for (int j = 1; j <= N; j++) {
int pt=-1;
if (j%2 ==0){
//even column
pt=2;
}else{
//odd column
pt=1;
}
for (int i = pt; i <= N; i+=2) {
//rows first, in the inner loop since it's stored in row order.
U[(N+2)*j+i] = 0.25 *
(h * h * F[(N+2)*j+i] + U[(N+2)*j+i-1] + U[(N+2)*(j-1)+i]
+ U[(N+2)*j+i+1]+ U[(N+2)*(j+1)+i]);
}
}
//guarentee all red points is updated.
#pragma omp barrier
//black points
#pragma omp for
for (int j = 1; j <= N; j++) {
int pt=-1;
if (j%2 ==0){
//even column
pt=1;
}else{
//odd column
pt=2;
}
for (int i = pt; i <= N; i+=2) {
//rows first, in the inner loop since it's stored in row order.
U[(N+2)*j+i] = 0.25 *
(h * h * F[(N+2)*j+i] + U[(N+2)*j+i-1] + U[(N+2)*(j-1)+i]
+ U[(N+2)*j+i+1]+ U[(N+2)*(j+1)+i]);
}
}
}
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
break;
}
}
}
__global__ void gs2D_gpu_kernel_black(int N, double h, double *U_new, double *U, double *F) {
__shared__ double smem[BLOCK_DIM][BLOCK_DIM];
smem[threadIdx.x][threadIdx.y]=0.0;
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+2 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+2){
smem[threadIdx.x][threadIdx.y]=U[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x];
}
__syncthreads();
if ((blockIdx.x*BLOCK_DIM_IN+threadIdx.x+blockIdx.y*BLOCK_DIM_IN+threadIdx.y)%2==0){
if (threadIdx.x<=BLOCK_DIM_IN && threadIdx.x>=1 &&
threadIdx.y<=BLOCK_DIM_IN && threadIdx.y>=1){
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+1 &&
blockIdx.x*BLOCK_DIM_IN+threadIdx.x>0 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+1 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y>0){
U_new[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x]=
0.25 *
(h * h * F[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y]
+ smem[threadIdx.x][threadIdx.y-1]+ smem[threadIdx.x][threadIdx.y+1]);
}
}
}
}
__global__ void gs2D_gpu_kernel_red(int N, double h, double *U_new, double *U, double *F) {
__shared__ double smem[BLOCK_DIM][BLOCK_DIM];
smem[threadIdx.x][threadIdx.y]=0.0;
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+2 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+2){
smem[threadIdx.x][threadIdx.y]=U[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x];
}
__syncthreads();
if ((blockIdx.x*BLOCK_DIM_IN+threadIdx.x+blockIdx.y*BLOCK_DIM_IN+threadIdx.y)%2==1){
if (threadIdx.x<=BLOCK_DIM_IN && threadIdx.x>=1 &&
threadIdx.y<=BLOCK_DIM_IN && threadIdx.y>=1){
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+1 &&
blockIdx.x*BLOCK_DIM_IN+threadIdx.x>0 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+1 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y>0){
U_new[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x]=
0.25 *
(h * h * F[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y]
+ smem[threadIdx.x][threadIdx.y-1]+ smem[threadIdx.x][threadIdx.y+1]);
}
}
}
}
void gs2D_gpu(int N, double *U, double *F, int maxit){
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double *U_d, *F_d;
hipMalloc(&U_d, (N+2)*(N+2)*sizeof(double));
hipMalloc(&F_d, (N+2)*(N+2)*sizeof(double));
hipMemcpy(U_d, U, (N+2)*(N+2)*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(F_d, F, (N+2)*(N+2)*sizeof(double),hipMemcpyHostToDevice);
// hipMalloc(&U_new, (N+2)*(N+2)*sizeof(double));
// hipMemcpy(U_new, U_d, (N+2)*(N+2)*sizeof(double),hipMemcpyDeviceToDevice);
// memset(U_new, 0, sizeof(double) * (N+2)*(N+2));
// __shared__ double smem[BLOCK_DIM][BLOCK_DIM];
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
dim3 blockDim(BLOCK_DIM, BLOCK_DIM);
dim3 gridDim((N-1)/(BLOCK_DIM_IN)+1, (N-1)/(BLOCK_DIM_IN)+1);
while (rel_res>tol){
hipLaunchKernelGGL(( gs2D_gpu_kernel_black), dim3(gridDim),dim3(blockDim), 0, 0, N, h, U_d, U_d, F_d);
hipDeviceSynchronize();
// hipMemcpy(U_d, U_new, (N+2)*(N+2)*sizeof(double),hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( gs2D_gpu_kernel_red), dim3(gridDim),dim3(blockDim), 0, 0, N, h, U_d, U_d, F_d);
hipMemcpy(U,U_d,(N+2)*(N+2)*sizeof(double),hipMemcpyDeviceToHost);
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
cout << "Remaining res: " << rel_res <<endl;
break;
}
}
cout << "Remaining res: " << rel_res <<endl;
// hipFree(U_new);
}
int main(int argc, char **argv) {
cout << "Please input N(default=10): " << endl;
int N = 10;
cin >> N;
cout << "Please input num of threads(default=1): " << endl;
int num_threads = 1;
cin >> num_threads;
int maxit=10000;
//allocate
double *U = (double*) malloc ((N+2)*(N+2)*sizeof(double));
double *F = (double*) malloc ((N+2)*(N+2)*sizeof(double));
//initialize
memset(U,0,(N+2)*(N+2)*sizeof(double));
memset(F,0,(N+2)*(N+2)*sizeof(double));
for (int i=0;i<(N+2)*(N+2);i++){
F[i]=1.0;
}
Timer t;
t.tic();
gs2D_cpu(N, U, F, maxit,num_threads);
printf("CPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "CPU Elapse time=" << t.toc() << "s" <<endl;
memset(U,0,(N+2)*(N+2)*sizeof(double));
t.tic();
gs2D_gpu(N, U, F, maxit);
printf("GPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "GPU Elapse time=" << t.toc() << "s" <<endl;
free(U);
free(F);
return 0;
}
| 9b958478a685d9501bdb2b2c2b50a16c9901b3fe.cu | //Zhe Chen
#include <iostream>
#include <cmath>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "utils.h"
using namespace std;
#define BLOCK_DIM 32
#define BLOCK_DIM_IN 30
double Residual(int N, double *U, double *F){
double h=1.0/(N+1.0);
double res=0.0, res_1=0.0;
#pragma omp parallel for shared(U,F) private(res_1)\
reduction(+:res)
for (int j=1;j<=N;j++){
for (int i=1;i<=N;i++){
res_1=(-U[(N+2)*j+i-1]-U[(N+2)*(j-1)+i]-U[(N+2)*j+i+1]-U[(N+2)*(j+1)+i]+4.0*U[(N+2)*j+i])/h/h-F[(N+2)*j+i];
res+=res_1*res_1;
}
}
res=sqrt(res);
return res;
}
void gs2D_cpu(int N, double *U, double *F, int maxit, int num_threads){
//black red points version of Gaiss-Seidel algorithm.
#if defined(_OPENMP)
int threads_all = omp_get_num_procs();
cout << "Number of cpus in this machine: " << threads_all << endl;
omp_set_num_threads(num_threads);
cout << "Use " << num_threads << " threads" << endl;
#endif
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
while (rel_res>tol){
#pragma omp parallel shared(U)
{
//red points
#pragma omp for
for (int j = 1; j <= N; j++) {
int pt=-1;
if (j%2 ==0){
//even column
pt=2;
}else{
//odd column
pt=1;
}
for (int i = pt; i <= N; i+=2) {
//rows first, in the inner loop since it's stored in row order.
U[(N+2)*j+i] = 0.25 *
(h * h * F[(N+2)*j+i] + U[(N+2)*j+i-1] + U[(N+2)*(j-1)+i]
+ U[(N+2)*j+i+1]+ U[(N+2)*(j+1)+i]);
}
}
//guarentee all red points is updated.
#pragma omp barrier
//black points
#pragma omp for
for (int j = 1; j <= N; j++) {
int pt=-1;
if (j%2 ==0){
//even column
pt=1;
}else{
//odd column
pt=2;
}
for (int i = pt; i <= N; i+=2) {
//rows first, in the inner loop since it's stored in row order.
U[(N+2)*j+i] = 0.25 *
(h * h * F[(N+2)*j+i] + U[(N+2)*j+i-1] + U[(N+2)*(j-1)+i]
+ U[(N+2)*j+i+1]+ U[(N+2)*(j+1)+i]);
}
}
}
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
break;
}
}
}
__global__ void gs2D_gpu_kernel_black(int N, double h, double *U_new, double *U, double *F) {
__shared__ double smem[BLOCK_DIM][BLOCK_DIM];
smem[threadIdx.x][threadIdx.y]=0.0;
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+2 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+2){
smem[threadIdx.x][threadIdx.y]=U[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x];
}
__syncthreads();
if ((blockIdx.x*BLOCK_DIM_IN+threadIdx.x+blockIdx.y*BLOCK_DIM_IN+threadIdx.y)%2==0){
if (threadIdx.x<=BLOCK_DIM_IN && threadIdx.x>=1 &&
threadIdx.y<=BLOCK_DIM_IN && threadIdx.y>=1){
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+1 &&
blockIdx.x*BLOCK_DIM_IN+threadIdx.x>0 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+1 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y>0){
U_new[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x]=
0.25 *
(h * h * F[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y]
+ smem[threadIdx.x][threadIdx.y-1]+ smem[threadIdx.x][threadIdx.y+1]);
}
}
}
}
__global__ void gs2D_gpu_kernel_red(int N, double h, double *U_new, double *U, double *F) {
__shared__ double smem[BLOCK_DIM][BLOCK_DIM];
smem[threadIdx.x][threadIdx.y]=0.0;
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+2 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+2){
smem[threadIdx.x][threadIdx.y]=U[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x];
}
__syncthreads();
if ((blockIdx.x*BLOCK_DIM_IN+threadIdx.x+blockIdx.y*BLOCK_DIM_IN+threadIdx.y)%2==1){
if (threadIdx.x<=BLOCK_DIM_IN && threadIdx.x>=1 &&
threadIdx.y<=BLOCK_DIM_IN && threadIdx.y>=1){
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+1 &&
blockIdx.x*BLOCK_DIM_IN+threadIdx.x>0 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+1 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y>0){
U_new[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x]=
0.25 *
(h * h * F[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y]
+ smem[threadIdx.x][threadIdx.y-1]+ smem[threadIdx.x][threadIdx.y+1]);
}
}
}
}
void gs2D_gpu(int N, double *U, double *F, int maxit){
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double *U_d, *F_d;
cudaMalloc(&U_d, (N+2)*(N+2)*sizeof(double));
cudaMalloc(&F_d, (N+2)*(N+2)*sizeof(double));
cudaMemcpy(U_d, U, (N+2)*(N+2)*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(F_d, F, (N+2)*(N+2)*sizeof(double),cudaMemcpyHostToDevice);
// cudaMalloc(&U_new, (N+2)*(N+2)*sizeof(double));
// cudaMemcpy(U_new, U_d, (N+2)*(N+2)*sizeof(double),cudaMemcpyDeviceToDevice);
// memset(U_new, 0, sizeof(double) * (N+2)*(N+2));
// __shared__ double smem[BLOCK_DIM][BLOCK_DIM];
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
dim3 blockDim(BLOCK_DIM, BLOCK_DIM);
dim3 gridDim((N-1)/(BLOCK_DIM_IN)+1, (N-1)/(BLOCK_DIM_IN)+1);
while (rel_res>tol){
gs2D_gpu_kernel_black<<<gridDim,blockDim>>>(N, h, U_d, U_d, F_d);
cudaDeviceSynchronize();
// cudaMemcpy(U_d, U_new, (N+2)*(N+2)*sizeof(double),cudaMemcpyDeviceToDevice);
gs2D_gpu_kernel_red<<<gridDim,blockDim>>>(N, h, U_d, U_d, F_d);
cudaMemcpy(U,U_d,(N+2)*(N+2)*sizeof(double),cudaMemcpyDeviceToHost);
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
cout << "Remaining res: " << rel_res <<endl;
break;
}
}
cout << "Remaining res: " << rel_res <<endl;
// cudaFree(U_new);
}
int main(int argc, char **argv) {
cout << "Please input N(default=10): " << endl;
int N = 10;
cin >> N;
cout << "Please input num of threads(default=1): " << endl;
int num_threads = 1;
cin >> num_threads;
int maxit=10000;
//allocate
double *U = (double*) malloc ((N+2)*(N+2)*sizeof(double));
double *F = (double*) malloc ((N+2)*(N+2)*sizeof(double));
//initialize
memset(U,0,(N+2)*(N+2)*sizeof(double));
memset(F,0,(N+2)*(N+2)*sizeof(double));
for (int i=0;i<(N+2)*(N+2);i++){
F[i]=1.0;
}
Timer t;
t.tic();
gs2D_cpu(N, U, F, maxit,num_threads);
printf("CPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "CPU Elapse time=" << t.toc() << "s" <<endl;
memset(U,0,(N+2)*(N+2)*sizeof(double));
t.tic();
gs2D_gpu(N, U, F, maxit);
printf("GPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "GPU Elapse time=" << t.toc() << "s" <<endl;
free(U);
free(F);
return 0;
}
|
0a50715887faf6103cbfa8b3143c566cd7924dda.hip | // !!! This is a file automatically generated by hipify!!!
/*
* renderer - A simple implementation of polygon-based 3D algorithms.
* Copyright (C) 2004 Thanassis Tsiodras ([email protected])
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef _WIN32
#include <windows.h>
#include <GL/glew.h>
#else
#define GL_GLEXT_PROTOTYPES
#include <GL/gl.h>
#endif
#include <GL/glut.h>
#include <SDL.h>
#include <cstdio>
#include <cfloat>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <hip/hip_vector_types.h>
#include "Types.h"
#include "Base3d.h"
#include "Camera.h"
#include "cudarenderer.h"
/////////////////////////////////
// Raytracing configuration
#define THREADS_PER_BLOCK 64
// What depth to stop reflections and refractions?
#define MAX_RAY_DEPTH 2
// Ray intersections of a distance <=NUDGE_FACTOR (from the origin) don't count
#define NUDGE_FACTOR 1e-5f
// How much the reflected color contributes to the overall
#define REFLECTIONS_RATE 0.375f
//////////////////////////////
// Enable ambient occlusion?
//#define AMBIENT_OCCLUSION
// How many ambient rays to spawn per ray intersection?
#define AMBIENT_SAMPLES 32
// How close to check for ambient occlusion?
#define AMBIENT_RANGE 0.15f
__constant__ unsigned VERTICES;
__constant__ unsigned TRIANGLES;
// Textures for vertices, triangles and BVH data
// (see CudaRender() below, as well as main() to see the data setup process)
texture<uint1, 1, hipReadModeElementType> g_triIdxListTexture;
texture<float2, 1, hipReadModeElementType> g_pCFBVHlimitsTexture;
texture<uint4, 1, hipReadModeElementType> g_pCFBVHindexesOrTrilistsTexture;
texture<float4, 1, hipReadModeElementType> g_verticesTexture;
texture<float4, 1, hipReadModeElementType> g_trianglesTexture;
// Utility functions
// CUDA dot product
__device__ coord dotCUDA(const Vector3& l, const Vector3& r)
{
return l._x*r._x +l._y*r._y +l._z*r._z;
}
__device__ coord dotCUDA(const float4& l, const Vector3& r)
{
return l.x*r._x +l.y*r._y +l.z*r._z;
}
__device__ coord dotCUDA(const Vector3& l, const float4& r)
{
return l._x*r.x +l._y*r.y +l._z*r.z;
}
// CUDA cross
__device__ Vector3 crossCUDA(const Vector3& l, const Vector3& r)
{
coord x,y,z;
const coord &aax=l._x;
const coord &aay=l._y;
const coord &aaz=l._z;
const coord &bbx=r._x;
const coord &bby=r._y;
const coord &bbz=r._z;
x=aay*bbz-bby*aaz;
y=bbx*aaz-aax*bbz;
z=aax*bby-aay*bbx;
return Vector3(x,y,z);
}
// CUDA distance of two points
__device__ coord distanceCUDA(const Vector3& a, const Vector3& b)
{
coord dx=a._x - b._x;
coord dy=a._y - b._y;
coord dz=a._z - b._z;
return sqrt(dx*dx + dy*dy + dz*dz);
}
// Sometime you just want to compare, so no sqrt is needed
__device__ coord distancesqCUDA(const Vector3& a, const Vector3& b)
{
coord dx=a._x - b._x;
coord dy=a._y - b._y;
coord dz=a._z - b._z;
return dx*dx + dy*dy + dz*dz;
}
// Matrix3x3 multipled by Vector3
__device__ Vector3 multiplyRightWith(const Matrix3& mv, const Vector3& r)
{
coord xnew = mv._row1._x*r._x + mv._row1._y*r._y + mv._row1._z*r._z;
coord ynew = mv._row2._x*r._x + mv._row2._y*r._y + mv._row2._z*r._z;
coord znew = mv._row3._x*r._x + mv._row3._y*r._y + mv._row3._z*r._z;
return Vector3(xnew, ynew, znew);
}
// Transform Vector3 to any space, given Matrix3 and origin
__device__ Vector3 inline TransformToSomeSpace(Vector3 point, Matrix3 *mv, Vector3 *origin)
{
point -= *origin;
return multiplyRightWith(*mv, point);
}
// After transformation in camera space, project and plot (used for point rendering)
#define CLIPPLANEDISTANCE 0.2f
__device__ void inline ProjectAndPlot(const Vector3& xformed, int *pixels, int defaultColor=0x00FFFFFF )
{
if (xformed._z>CLIPPLANEDISTANCE) {
int x = (int)(MAXX/2.f + FOV * xformed._y/xformed._z);
int y = (int)(MAXY/2.f - FOV * xformed._x/xformed._z);
if (y>=0.f && y<(int)MAXY && x>=0.f && x<(int)MAXX)
pixels[y*MAXX + x] = defaultColor;
}
}
////////////////////////////////////////
// Rendering kernel for MODE_POINTS
////////////////////////////////////////
__global__ void CoreLoopVertices(int *pixels, Matrix3 *cudaWorldToCameraSpace, Vector3 *eye)
{
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= VERTICES)
return;
// Simple projection and ploting of a white point per vertex
// Plot projected coordinates (on screen)
Vector3 v(tex1Dfetch(g_verticesTexture, 2*idx));
ProjectAndPlot(
TransformToSomeSpace(v, cudaWorldToCameraSpace, eye),
pixels);
}
//////////////////////////////////////////////
// Rendering kernel for MODE_POINTSHIDDEN
//////////////////////////////////////////////
// Create OpenGL BGR value for assignment in PBO buffer
__device__ int getColor(Pixel& p)
{
return (((unsigned)p._b) << 16) | (((unsigned)p._g) << 8) | (((unsigned)p._r));
}
__global__ void CoreLoopTriangles(int *pixels, Matrix3 *cudaWorldToCameraSpace, Triangle *pTriangles, Vector3 *eye)
{
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= TRIANGLES)
return;
// First check if the triangle is visible from where we stand
// (closed objects only)
float4 center = tex1Dfetch(g_trianglesTexture, 5*idx);
float4 normal = tex1Dfetch(g_trianglesTexture, 5*idx+1);
Vector3 triToEye = *eye;
triToEye -= center;
// Normally we would normalize, but since we just need the sign
// of the dot product (to determine if it facing us or not)...
//triToEye.normalize();
//if (!pTriangles[idx]._twoSided && dotCUDA(triToEye, pTriangles[idx]._normal)<0.f)
if (center.w == 0.f && dotCUDA(triToEye, normal)<0.f)
return;
int color = getColor(pTriangles[idx]._colorf);
// For each of the 3 vertices of triangle j of object i,
// transform to camera space, project and plot them
Vector3 v1(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx1));
Vector3 v2(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx2));
Vector3 v3(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx3));
ProjectAndPlot( TransformToSomeSpace(v1, cudaWorldToCameraSpace, eye), pixels, color);
ProjectAndPlot( TransformToSomeSpace(v2, cudaWorldToCameraSpace, eye), pixels, color);
ProjectAndPlot( TransformToSomeSpace(v3, cudaWorldToCameraSpace, eye), pixels, color);
}
///////////////////////////////////////////////
// Raytracing modes
///////////////////////////////////////////////
// Helper function, that checks whether a ray intersects a bbox
__device__ bool RayIntersectsBox(
const Vector3& originInWorldSpace, const Vector3& rayInWorldSpace, int boxIdx)
{
// set Tnear = - infinity, Tfar = infinity
//
// For each pair of planes P associated with X, Y, and Z do:
// (example using X planes)
// if direction Xd = 0 then the ray is parallel to the X planes, so
// if origin Xo is not between the slabs ( Xo < Xl or Xo > Xh) then
// return false
// else, if the ray is not parallel to the plane then
// begin
// compute the intersection distance of the planes
// T1 = (Xl - Xo) / Xd
// T2 = (Xh - Xo) / Xd
// If T1 > T2 swap (T1, T2) /* since T1 intersection with near plane */
// If T1 > Tnear set Tnear =T1 /* want largest Tnear */
// If T2 < Tfar set Tfar="T2" /* want smallest Tfar */
// If Tnear > Tfar box is missed so
// return false
// If Tfar < 0 box is behind ray
// return false
// end
// end of for loop
//
// If Box survived all above tests, return true with intersection point Tnear and exit point Tfar.
coord Tnear, Tfar;
Tnear = -FLT_MAX;
Tfar = FLT_MAX;
float2 limits;
#define CHECK_NEAR_AND_FAR_INTERSECTION(c) \
if (rayInWorldSpace._ ## c == 0.f) { \
if (originInWorldSpace._##c < limits.x) return false; \
if (originInWorldSpace._##c > limits.y) return false; \
} else { \
coord T1 = (limits.x - originInWorldSpace._##c)/rayInWorldSpace._##c; \
coord T2 = (limits.y - originInWorldSpace._##c)/rayInWorldSpace._##c; \
if (T1>T2) { coord tmp=T1; T1=T2; T2=tmp; } \
if (T1 > Tnear) Tnear = T1; \
if (T2 < Tfar) Tfar = T2; \
if (Tnear > Tfar) \
return false; \
if (Tfar < 0.f) \
return false; \
}
limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx); // box.bottom._x/top._x placed in limits.x/limits.y
CHECK_NEAR_AND_FAR_INTERSECTION(x)
limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx+1); // box.bottom._y/top._y placed in limits.x/limits.y
CHECK_NEAR_AND_FAR_INTERSECTION(y)
limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx+2); // box.bottom._z/top._z placed in limits.x/limits.y
CHECK_NEAR_AND_FAR_INTERSECTION(z)
return true;
}
// Templated ray/triangle intersection function - offers two compile-time options:
//
// The first one is used to discriminate between shadow rays (that stop at the first hit)
// and normal rays, that have to find the closest hit.
//
// The second one enables or disables culling of backfacing triangles, and is...
// (a) enabled for the refraction call (which needs both front and back-faces), but
// (b) disabled for reflections and shadow rays.
//
// C++ compile-time power... all lesser languages bow down... :-)
//
template <bool stopAtfirstRayHit, bool doCulling>
__device__ bool BVH_IntersectTriangles(
// Inputs
//Triangle *pTriangles,
const Vector3& origin, const Vector3& ray, unsigned avoidSelf,
// outputs
int& pBestTriIdx,
//
// both inputs and outputs!
//
// for normal rays:
// pointHitInWorldSpace (output)
// kXX (outputs) perpendicular distances of intersection point from the 3 triangle edges
// (used for PhongNormal calculations)
//
// for shadow rays:
// pointHitInWorldSpace (input) provides the light position
Vector3& pointHitInWorldSpace,
coord& kAB, coord& kBC, coord& kCA)
{
// in the loop below, maintain the closest triangle and the point where we hit it:
pBestTriIdx = -1;
coord bestTriDist;
// light position passed-in pointHitInWorldSpace (only in shadow mode - i.e. stopAtfirstRayHit=true)
Vector3& lightPos = pointHitInWorldSpace;
// Compile-time work (stopAtfirstRayHit is template param)
if (stopAtfirstRayHit)
// In shadow ray mode, start from light distance
bestTriDist = distancesqCUDA(origin, lightPos);
else
// In normal mode, start from infinity
bestTriDist = FLT_MAX;
int stack[BVH_STACK_SIZE];
int stackIdx = 0;
stack[stackIdx++] = 0;
while(stackIdx) {
int boxIdx = stack[stackIdx-1];
//CacheFriendlyBVHNode *pCurrent = &cudaBVHNodes[boxIdx];
stackIdx--;
uint4 data = tex1Dfetch(g_pCFBVHindexesOrTrilistsTexture, boxIdx);
// original, "pure" BVH form...
//if (!pCurrent->IsLeaf()) {
// cache-friendly BVH form...
//if (!(pCurrent->u.leaf._count & 0x80000000)) {
// textured BVH form...
if (!(data.x & 0x80000000)) {
if (RayIntersectsBox(origin, ray, boxIdx)) {
//stack[stackIdx++] = pCurrent->u.inner._idxRight;
stack[stackIdx++] = data.y;
//stack[stackIdx++] = pCurrent->u.inner._idxLeft;
stack[stackIdx++] = data.z;
if(stackIdx>BVH_STACK_SIZE)
{
return false; // XXX
}
}
} else {
// original, "pure" BVH form...
//BVHLeaf *p = dynamic_cast<BVHLeaf*>(pCurrent);
//for(std::list<const Triangle*>::iterator it=p->_triangles.begin();
// it != p->_triangles.end();
// it++)
// cache-friendly BVH form...
//for(unsigned i=pCurrent->u.leaf._startIndexInTriIndexList;
// i<pCurrent->u.leaf._startIndexInTriIndexList + (pCurrent->u.leaf._count & 0x7fffffff);
// textured BVH form...
for(unsigned i=data.w; i<data.w + (data.x & 0x7fffffff); i++) {
// original, "pure" BVH form...
//const Triangle& triangle = *(*it);
// cache-friendly BVH form...
//const Triangle& triangle = pTriangles[cudaTriIdxList[i]];
// textured BVH form...
int idx = tex1Dfetch(g_triIdxListTexture, i).x;
if (avoidSelf == idx)
continue; // avoid self-reflections/refractions
float4 center = tex1Dfetch(g_trianglesTexture, 5*idx);
float4 normal = tex1Dfetch(g_trianglesTexture, 5*idx+1);
// doCulling is a compile-time param, this code will be "codegenerated"
// at compile time only for reflection-related calls to Raytrace (see below)
//if (doCulling && !triangle._twoSided) {
if (doCulling && (center.w == 0.f)) { // template-param, compile-time check
// Check visibility of triangle via dot product
Vector3 fromTriToOrigin = origin;
//fromTriToOrigin -= triangle._center;
fromTriToOrigin -= center;
// Normally we would normalize, but since we just need the sign
// of the dot product (to determine if it facing us or not)...
//fromTriToOrigin.normalize();
if (dotCUDA(fromTriToOrigin, normal)<0)
continue;
}
// Use the pre-computed triangle intersection data: normal, d, e1/d1, e2/d2, e3/d3
coord k = dotCUDA(normal, ray);
if (k == 0.0f)
continue; // this triangle is parallel to the ray, ignore it.
coord s = (normal.w - dotCUDA(normal, origin))/k;
if (s <= 0.0f) // this triangle is "behind" the origin.
continue;
if (s <= NUDGE_FACTOR)
continue;
Vector3 hit = ray*s;
hit += origin;
// Is the intersection of the ray with the triangle's plane INSIDE the triangle?
float4 ee1 = tex1Dfetch(g_trianglesTexture, 5*idx+2);
coord kt1 = dotCUDA(ee1, hit) - ee1.w; if (kt1<0.0f) continue;
float4 ee2 = tex1Dfetch(g_trianglesTexture, 5*idx+3);
coord kt2 = dotCUDA(ee2, hit) - ee2.w; if (kt2<0.0f) continue;
float4 ee3 = tex1Dfetch(g_trianglesTexture, 5*idx+4);
coord kt3 = dotCUDA(ee3, hit) - ee3.w; if (kt3<0.0f) continue;
// It is, "hit" is the world space coordinate of the intersection.
// Was this a normal ray or a shadow ray? (template param)
if (stopAtfirstRayHit) {
// Shadow ray, check whether the triangle obstructs the light
coord dist = distancesqCUDA(lightPos, hit);
if (dist < bestTriDist) // distance to light (squared) passed in kAB
return true; // we found a triangle obstructing the light, return true
} else {
// Normal ray - it this intersection closer than all the others?
coord hitZ = distancesqCUDA(origin, hit);
if (hitZ < bestTriDist) {
// maintain the closest hit
bestTriDist = hitZ;
pBestTriIdx = idx;
pointHitInWorldSpace = hit;
kAB = kt1;
kBC = kt2;
kCA = kt3;
}
}
}
}
}
// Normal ray or shadow ray? (compile-time template param)
if (!stopAtfirstRayHit)
// for normal ray, return true if we pierced a triangle
return pBestTriIdx != -1;
else
// for shadow ray, return true if we found a triangle obstructing the light.
return false;
}
// CUDA 1.2 has no recursion - I therefore use the magic of C++ templates:
// Compile-time recursion using the "depth" param!
template <int depth, bool doSpecular, bool doPhongInterp, bool doReflections, bool doShadows, bool doCulling>
__device__ Pixel Raytrace(
Vector3 originInWorldSpace, Vector3 rayInWorldSpace, int avoidSelf,
Triangle *pTriangles,
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace)
{
int pBestTriIdx = -1;
const Triangle *pBestTri = NULL;
Vector3 pointHitInWorldSpace;
coord kAB=0.f, kBC=0.f, kCA=0.f; // distances from the 3 edges of the triangle (from where we hit it)
// Use the surface-area heuristic based, bounding volume hierarchy of axis-aligned bounding boxes
// (keywords: SAH, BVH, AABB)
if (!BVH_IntersectTriangles<false,doCulling>(
//pTriangles,
originInWorldSpace, rayInWorldSpace, avoidSelf,
pBestTriIdx, pointHitInWorldSpace, kAB, kBC, kCA))
// We pierced no triangle, return with no contribution (ambient is black)
return Pixel(0.f,0.f,0.f);
// Set this to pass to recursive calls below, so that we don't get self-shadow or self-reflection
// from this triangle...
avoidSelf = pBestTriIdx;
pBestTri = &pTriangles[pBestTriIdx];
// We'll also calculate the color contributed from this intersection
// Start from the triangle's color
Pixel color = pBestTri->_colorf;
// Phong interpolation of normal vector: these values are only set if
// the doPhongInterp template param is set
Vector3 phongNormal;
coord ABx,BCx,CAx,area;
float4 V1;
float4 N1;
float4 V2;
float4 N2;
float4 V3;
float4 N3;
V1 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx1);
V2 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx2);
V3 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx3);
if (doPhongInterp) { // template-param, compile-time check
// These are the closest triangle's vertices...
N1 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx1+1);
N2 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx2+1);
N3 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx3+1);
const Vector3 bestTriA = Vector3(V1.x,V1.y,V1.z);
const Vector3 bestTriB = Vector3(V2.x,V2.y,V2.z);
const Vector3 bestTriC = Vector3(V3.x,V3.y,V3.z);
// ...and their normal vectors:
const Vector3 bestTriNrmA = Vector3(N1.x,N1.y,N1.z);
const Vector3 bestTriNrmB = Vector3(N2.x,N2.y,N2.z);
const Vector3 bestTriNrmC = Vector3(N3.x,N3.y,N3.z);
// We now want to interpolate the triangle's normal,
// so that as the "pointHitInWorldSpace" gets closer to
// a vertex X, the interpolated normal becomes closer to bestTriNrmX,
// and becomes EXACTLY bestTriNrmX, if the pointHitInWorldSpace is X.
//
// To do that, we use the 3 areas of the triangle, as it is divided
// by the pointHitInWorldSpace.
//
// This is the total triangle's area: cross product of two edges
// (in fact, we should divide by 2, but since we're only interested
// in ratios (see below), there is no need)
Vector3 AB = bestTriB; AB-= bestTriA; // edge AB
Vector3 BC = bestTriC; BC-= bestTriB; // edge BC
Vector3 crossAB_BC = crossCUDA(AB, BC);
area = crossAB_BC.length(); // 2*area(ABC)
// And these are the three sub-triangles - kAB,kBC,kCA were found above...
ABx = kAB*distanceCUDA(bestTriA, bestTriB);
BCx = kBC*distanceCUDA(bestTriB, bestTriC);
CAx = kCA*distanceCUDA(bestTriC, bestTriA);
// use the area of the sub-triangle ACROSS a point, to scale!
// (which means that if pointHitInCameraSpace is exactly on a vertex,
// the area of the sub-triangle becomes the area of the triangle!)
Vector3 phongNormalA = bestTriNrmA; phongNormalA *= BCx / area;
Vector3 phongNormalB = bestTriNrmB; phongNormalB *= CAx / area;
Vector3 phongNormalC = bestTriNrmC; phongNormalC *= ABx / area;
// and finally, accumulate the three contributions and normalize.
phongNormal = phongNormalA + phongNormalB + phongNormalC;
phongNormal.normalize();
} else
phongNormal = pBestTri->_normal;
#ifdef AMBIENT_OCCLUSION
// Calculate ambient occlusion - throw AMBIENT_SAMPLES number of random rays
// in the hemisphere formed from the pointHitInWorldSpace and the normal vector...
int i=0;
coord totalLight = 0.f, maxLight = 0.f;
while (i<AMBIENT_SAMPLES) {
Vector3 ambientRay = phongNormal;
ambientRay._x += float(rand()-RAND_MAX/2)/(RAND_MAX/2);
ambientRay._y += float(rand()-RAND_MAX/2)/(RAND_MAX/2);
ambientRay._z += float(rand()-RAND_MAX/2)/(RAND_MAX/2);
float cosangle = dotCUDA(ambientRay, phongNormal);
if (cosangle<0.f) continue;
i++;
maxLight += cosangle;
ambientRay.normalize();
Vector3 temp(pointHitInWorldSpace);
temp += ambientRay*AMBIENT_RANGE;
const Triangle *dummy;
// Some objects needs a "nudge", to avoid self-shadowing
//Vector3 nudgedPointHitInWorldSpace = pointHitInWorldSpace;
//nudgedPointHitInWorldSpace += ambientRay*.005f;
//if (!BVH_IntersectTriangles<true,true>(
// nudgedPointHitInWorldSpace, ambientRay, avoidSelf,
if (!BVH_IntersectTriangles<true,true>(
pTriangles,
pointHitInWorldSpace, ambientRay, avoidSelf,
dummy, temp, kAB, kAB, kAB)) {
// Accumulate contribution of this random ray
totalLight += cosangle;
}
}
// total ambient light, averaged over all random rays
color *= (AMBIENT/255.0f)*(totalLight/maxLight);
#else
// Dont calculate ambient occlusion, use the pre-calculated value from the model
// (assuming it exists!)
coord ambientOcclusionCoeff;
if (doPhongInterp) { // template-param, compile-time check
// we have a phong normal, so use the subtriangle areas
// to interpolate the 3 ambientOcclusionCoeff values
ambientOcclusionCoeff =
V1.w*BCx/area +
V2.w*CAx/area +
V3.w*ABx/area;
/*pVertices[pBestTri->_idx1]._ambientOcclusionCoeff*BCx/area +
pVertices[pBestTri->_idx2]._ambientOcclusionCoeff*CAx/area +
pVertices[pBestTri->_idx3]._ambientOcclusionCoeff*ABx/area;*/
} else {
// we dont have a phong normal, just average the 3 values of the vertices
ambientOcclusionCoeff = (V1.w + V2.w + V3.w)/3.f;
/*pVertices[pBestTri->_idx1]._ambientOcclusionCoeff +
pVertices[pBestTri->_idx2]._ambientOcclusionCoeff +
pVertices[pBestTri->_idx3]._ambientOcclusionCoeff)/3.f;*/
}
coord ambientFactor = (coord) ((AMBIENT*ambientOcclusionCoeff/255.0f)/255.0f);
color *= ambientFactor;
#endif // AMBIENT_OCCLUSION
/*
// Now, for all the lights...
for(unsigned i=0; i<scene._lights.size(); i++) {
Light& light = *scene._lights[i];
*/
Vector3& light = *cudaLightPosInWorldSpace;
// This light's diffuse and specular contribution
Pixel dColor = Pixel(); // start with black
// We calculate the vector from point hit, to light (both in world space).
Vector3 pointToLight = light;
pointToLight -= pointHitInWorldSpace;
bool inShadow = false;
if (doShadows) { // template-param, compile-time check
// this is our distance from the light (squared, i.e. we didnt use an sqrt)
coord distanceFromLightSq = pointToLight.lengthsq();
Vector3 shadowrayInWorldSpace = pointToLight;
shadowrayInWorldSpace /= sqrt(distanceFromLightSq);
int pDummy; // just to fill-in the param, not used for shadowrays
if (BVH_IntersectTriangles<true,doCulling>(
//pTriangles,
pointHitInWorldSpace, shadowrayInWorldSpace, avoidSelf,
pDummy, // dummy
light,
kAB, kAB, kAB)) // dummies
{
//continue; // we were in shadow, go to next light
inShadow = true;
}
}
if (!inShadow) {
// Diffuse color
pointToLight.normalize(); // vector from point to light (in world space)
coord intensity = dotCUDA(phongNormal, pointToLight);
if (intensity<0.f) {
; // in shadow, let it be in ambient
} else {
Pixel diffuse = pBestTri->_colorf;
diffuse *= (coord) (DIFFUSE*intensity/255.f); // diffuse set to a maximum of 130/255
dColor += diffuse;
if (doSpecular) { // template-param, compile-time check
// Specular color
// We will use the half vector: pointToLight + point to camera
Vector3 pointToCamera = *cudaEyePosInWorldSpace;
pointToCamera -= pointHitInWorldSpace;
pointToCamera.normalize();
Vector3 half = pointToLight;
half += pointToCamera;
half.normalize();
// use the interpolated phong normal!
coord intensity2 = dotCUDA(half, phongNormal);
if (intensity2>0.f) {
intensity2 *= intensity2;
intensity2 *= intensity2;
intensity2 *= intensity2;
intensity2 *= intensity2;
intensity2 *= intensity2;
dColor += Pixel(
(unsigned char)(SPECULAR*intensity2),
(unsigned char)(SPECULAR*intensity2),
(unsigned char)(SPECULAR*intensity2));
}
}
}
color += dColor;
}
// } for each light
if (!doReflections)
return color;
else {
originInWorldSpace = pointHitInWorldSpace;
const Vector3& nrm = phongNormal;
float c1 = -dotCUDA(rayInWorldSpace, nrm);
// Reflections:
//
// ray = ray - 2 (ray dot normal) normal
Vector3 reflectedRay = rayInWorldSpace;
reflectedRay += nrm*(2.0f*c1);
reflectedRay.normalize();
return
color
/* use backface culling for reflection rays: <true> */
+ Raytrace<depth+1, doSpecular, doPhongInterp, doReflections, doShadows, true>(
originInWorldSpace, reflectedRay, avoidSelf,
pTriangles,
cudaEyePosInWorldSpace, cudaLightPosInWorldSpace) * REFLECTIONS_RATE
/* ...but not for refraction rays: <false>
REMOVED, 2011/02/04
*/
;
}
}
// CUDA 1.2 has no recursion - I therefore use the magic of C++ templates:
// Compile-time recursion using the "depth" param!
//
// These are the template specializations that stop the compile-time recursion
// at MAX_RAY_DEPTH level.
#define STOP_RECURSION(a,b,c,d,e) \
template <> \
__device__ Pixel Raytrace<MAX_RAY_DEPTH,a,b,c,d,e>( \
Vector3 originInWorldSpace, Vector3 rayInWorldSpace, int avoidSelf, \
Triangle *pTriangles, \
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace) \
{ \
return Pixel(0.f,0.f,0.f); \
}
#define f false
#define t true
STOP_RECURSION(f,f,f,f,f)
STOP_RECURSION(f,f,f,f,t)
STOP_RECURSION(f,f,f,t,f)
STOP_RECURSION(f,f,f,t,t)
STOP_RECURSION(f,f,t,f,f)
STOP_RECURSION(f,f,t,f,t)
STOP_RECURSION(f,f,t,t,f)
STOP_RECURSION(f,f,t,t,t)
STOP_RECURSION(f,t,f,f,f)
STOP_RECURSION(f,t,f,f,t)
STOP_RECURSION(f,t,f,t,f)
STOP_RECURSION(f,t,f,t,t)
STOP_RECURSION(f,t,t,f,f)
STOP_RECURSION(f,t,t,f,t)
STOP_RECURSION(f,t,t,t,f)
STOP_RECURSION(f,t,t,t,t)
STOP_RECURSION(t,f,f,f,f)
STOP_RECURSION(t,f,f,f,t)
STOP_RECURSION(t,f,f,t,f)
STOP_RECURSION(t,f,f,t,t)
STOP_RECURSION(t,f,t,f,f)
STOP_RECURSION(t,f,t,f,t)
STOP_RECURSION(t,f,t,t,f)
STOP_RECURSION(t,f,t,t,t)
STOP_RECURSION(t,t,f,f,f)
STOP_RECURSION(t,t,f,f,t)
STOP_RECURSION(t,t,f,t,f)
STOP_RECURSION(t,t,f,t,t)
STOP_RECURSION(t,t,t,f,f)
STOP_RECURSION(t,t,t,f,t)
STOP_RECURSION(t,t,t,t,f)
STOP_RECURSION(t,t,t,t,t)
#undef f
#undef t
// Main CUDA kernel, templated, to support each of the desired features:
//
// - using specular lights or not
// - doing Phong normal interpolation or not
// - doing reflections or not
// - doing shadows or not
// - doing anti-alias or not
//
template <bool doSpecular, bool doPhongInterp, bool doReflections, bool doShadows, bool antialias>
__global__ void CoreLoopTrianglesRaycaster(
int *pixels,
Matrix3 *cudaWorldToCameraSpace,
Triangle *pTriangles,
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace,
unsigned *cudaMortonTable)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx>=MAXX*MAXY)
return;
// without MORTON_ORDER
// int x = idx % MAXX;
// int y = idx / MAXX;
// This code will not allocate idx to a normal, left-to-right, top-to-bottom
// allocation of pixels. Instead, it will use the pre-calculated Morton order
// of pixels in screen space, to make threads end up reading the same (hopefully)
// BVH and triangle data (more co-alesced, cache-friendly accesses).
int x = int(cudaMortonTable[idx] & 0xFFFF);
int y = int((cudaMortonTable[idx] & 0xFFFF0000)>>16);
Pixel finalColor(0,0,0);
int pixelsTraced = 1;
if (antialias)
pixelsTraced = 4;
while(pixelsTraced--) {
// We will shoot a ray in camera space (from Eye to the screen point, so in camera
// space, from (0,0,0) to this:
coord xx = (coord)x;
coord yy = (coord)y;
if (antialias) {
// nudge in a cross pattern around the pixel center
xx += 0.25f - .5f*(pixelsTraced&1);
yy += 0.25f - .5f*((pixelsTraced&2)>>1);
}
coord lx = coord((MAXY/2)-yy)/SCREEN_DIST;
coord ly = coord(xx-(MAXX/2))/SCREEN_DIST;
coord lz = 1.0f;
Vector3 rayInCameraSpace(lx,ly,lz);
rayInCameraSpace.normalize();
// We will need the origin in world space
Vector3 originInWorldSpace = *cudaEyePosInWorldSpace;
// We have a rayInCameraSpace, and we want to use the BVH, which was constructed
// in World space, so we convert the ray in World space
Vector3 rayInWorldSpace = cudaWorldToCameraSpace->_row1 * rayInCameraSpace._x;
rayInWorldSpace += cudaWorldToCameraSpace->_row2 * rayInCameraSpace._y;
rayInWorldSpace += cudaWorldToCameraSpace->_row3 * rayInCameraSpace._z;
// in theory, this should not be required
rayInWorldSpace.normalize();
// Primary ray, level 0, and we want backface culling: <true>
finalColor += Raytrace<0, doSpecular, doPhongInterp, doReflections, doShadows, true>(
originInWorldSpace, rayInWorldSpace, -1,
pTriangles,
cudaEyePosInWorldSpace, cudaLightPosInWorldSpace);
}
if (antialias)
finalColor /= 4.f;
if (finalColor._r>255.0f) finalColor._r=255.0f;
if (finalColor._g>255.0f) finalColor._g=255.0f;
if (finalColor._b>255.0f) finalColor._b=255.0f;
int color = getColor(finalColor);
// without MORTON_ORDER
//pixels[idx] = color;
pixels[y*MAXX+x] = color;
}
// The bridge to the normal C++ world: templated, to include only the mode-specific code in each incantation
bool g_bFirstTime = true;
void CudaRender(
Matrix3 *cudaWorldToCameraSpace,
Vertex *cudaPtrVertices, Triangle *cudaPtrTriangles, float *cudaTriangleIntersectionData,
int *cudaTriIdxList, float *cudaBVHlimits, int *cudaBVHindexesOrTrilists,
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace,
unsigned *cudaMortonTable)
{
if (g_bFirstTime) {
// The first time any of the CudaRender templates is called,
// bind the texture data!
g_bFirstTime = false;
hipChannelFormatDesc channel1desc = hipCreateChannelDesc<uint1>();
hipBindTexture(NULL, &g_triIdxListTexture, cudaTriIdxList, &channel1desc, g_triIndexListNo*sizeof(uint1));
hipChannelFormatDesc channel2desc = hipCreateChannelDesc<float2>();
hipBindTexture(NULL, &g_pCFBVHlimitsTexture, cudaBVHlimits, &channel2desc, g_pCFBVH_No*6*sizeof(float));
hipChannelFormatDesc channel3desc = hipCreateChannelDesc<int4>();
hipBindTexture(NULL, &g_pCFBVHindexesOrTrilistsTexture, cudaBVHindexesOrTrilists, &channel3desc,
g_pCFBVH_No*sizeof(uint4));
hipChannelFormatDesc channel4desc = hipCreateChannelDesc<float4>();
hipBindTexture(NULL, &g_verticesTexture, cudaPtrVertices, &channel4desc, g_verticesNo*8*sizeof(float));
hipChannelFormatDesc channel5desc = hipCreateChannelDesc<float4>();
hipBindTexture(NULL, &g_trianglesTexture, cudaTriangleIntersectionData, &channel5desc, g_trianglesNo*20*sizeof(float));
}
int *pixels;
glBindTexture(GL_TEXTURE_2D, tex);
SAFE(hipGLMapBufferObject__((void**)&pixels, buffer));
if (g_bUsePoints) {
hipMemset(pixels, 0x40, MAXX*MAXY*sizeof(unsigned)); // Clear all pixels to ambient
int blocksVertices = (g_verticesNo + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( CoreLoopVertices), dim3(blocksVertices), dim3(THREADS_PER_BLOCK) , 0, 0,
pixels, cudaWorldToCameraSpace, cudaEyePosInWorldSpace);
/*
hipMemset(pixels, 0x40, MAXX*MAXY*sizeof(unsigned)); // Clear all pixels to ambient
int blocksTriangles = (g_trianglesNo + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
CoreLoopTriangles<<< blocksTriangles, THREADS_PER_BLOCK >>>(
pixels, cudaWorldToCameraSpace, cudaPtrTriangles, cudaEyePosInWorldSpace);*/
} else {
int blockPixels = (MAXY*MAXX + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
DBG_PUTS(1, "CoreLoopTrianglesRaycaster begins");
#define PAINT(bDoSpecular,bDoPhongInterp,bDoReflections,bDoShadows,bDoAntialias) \
hipLaunchKernelGGL(( CoreLoopTrianglesRaycaster<bDoSpecular,bDoPhongInterp,bDoReflections,bDoShadows,bDoAntialias>) \
, dim3(blockPixels), dim3(THREADS_PER_BLOCK) , 0, 0, \
pixels, \
cudaWorldToCameraSpace, \
cudaPtrTriangles, \
cudaEyePosInWorldSpace, cudaLightPosInWorldSpace, \
cudaMortonTable);
// Being templated, one can use the CoreLoopTrianglesRaycaster in any form one wants.
// In theory, C++ compilers don't need this expansion (which I wrote with a simple Python script)
// Unfortunately, we can't pass runtime vars in template params, not even when they are enumerants
// or booleans...
PAINT( true , true , true , true , true )
}
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
SAFE(hipDeviceSynchronize());
SAFE(hipGLUnmapBufferObject(buffer));
// Use OpenGL texture to display the generated frame at lightning speed
// (the PBO buffer is already on the card, no useless PCI bus criss-cross)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, MAXX, MAXY, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(1.0f, 1.0f);
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, 1.0f);
glEnd();
// hack:
extern bool g_benchmark;
if (!g_benchmark) {
// Display the "Press H for help" message
glDisable(GL_LIGHTING);
glDisable(GL_TEXTURE_2D);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2f(-0.95, 0.9);
const char *help = "Press H for help";
for(unsigned o=0;o<strlen(help); o++)
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, help[o]);
}
SDL_GL_SwapBuffers();
}
| 0a50715887faf6103cbfa8b3143c566cd7924dda.cu | /*
* renderer - A simple implementation of polygon-based 3D algorithms.
* Copyright (C) 2004 Thanassis Tsiodras ([email protected])
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef _WIN32
#include <windows.h>
#include <GL/glew.h>
#else
#define GL_GLEXT_PROTOTYPES
#include <GL/gl.h>
#endif
#include <GL/glut.h>
#include <SDL.h>
#include <cstdio>
#include <cfloat>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
#include "Types.h"
#include "Base3d.h"
#include "Camera.h"
#include "cudarenderer.h"
/////////////////////////////////
// Raytracing configuration
#define THREADS_PER_BLOCK 64
// What depth to stop reflections and refractions?
#define MAX_RAY_DEPTH 2
// Ray intersections of a distance <=NUDGE_FACTOR (from the origin) don't count
#define NUDGE_FACTOR 1e-5f
// How much the reflected color contributes to the overall
#define REFLECTIONS_RATE 0.375f
//////////////////////////////
// Enable ambient occlusion?
//#define AMBIENT_OCCLUSION
// How many ambient rays to spawn per ray intersection?
#define AMBIENT_SAMPLES 32
// How close to check for ambient occlusion?
#define AMBIENT_RANGE 0.15f
__constant__ unsigned VERTICES;
__constant__ unsigned TRIANGLES;
// Textures for vertices, triangles and BVH data
// (see CudaRender() below, as well as main() to see the data setup process)
texture<uint1, 1, cudaReadModeElementType> g_triIdxListTexture;
texture<float2, 1, cudaReadModeElementType> g_pCFBVHlimitsTexture;
texture<uint4, 1, cudaReadModeElementType> g_pCFBVHindexesOrTrilistsTexture;
texture<float4, 1, cudaReadModeElementType> g_verticesTexture;
texture<float4, 1, cudaReadModeElementType> g_trianglesTexture;
// Utility functions
// CUDA dot product
__device__ coord dotCUDA(const Vector3& l, const Vector3& r)
{
return l._x*r._x +l._y*r._y +l._z*r._z;
}
__device__ coord dotCUDA(const float4& l, const Vector3& r)
{
return l.x*r._x +l.y*r._y +l.z*r._z;
}
__device__ coord dotCUDA(const Vector3& l, const float4& r)
{
return l._x*r.x +l._y*r.y +l._z*r.z;
}
// CUDA cross
__device__ Vector3 crossCUDA(const Vector3& l, const Vector3& r)
{
coord x,y,z;
const coord &aax=l._x;
const coord &aay=l._y;
const coord &aaz=l._z;
const coord &bbx=r._x;
const coord &bby=r._y;
const coord &bbz=r._z;
x=aay*bbz-bby*aaz;
y=bbx*aaz-aax*bbz;
z=aax*bby-aay*bbx;
return Vector3(x,y,z);
}
// CUDA distance of two points
__device__ coord distanceCUDA(const Vector3& a, const Vector3& b)
{
coord dx=a._x - b._x;
coord dy=a._y - b._y;
coord dz=a._z - b._z;
return sqrt(dx*dx + dy*dy + dz*dz);
}
// Sometime you just want to compare, so no sqrt is needed
__device__ coord distancesqCUDA(const Vector3& a, const Vector3& b)
{
coord dx=a._x - b._x;
coord dy=a._y - b._y;
coord dz=a._z - b._z;
return dx*dx + dy*dy + dz*dz;
}
// Matrix3x3 multipled by Vector3
__device__ Vector3 multiplyRightWith(const Matrix3& mv, const Vector3& r)
{
coord xnew = mv._row1._x*r._x + mv._row1._y*r._y + mv._row1._z*r._z;
coord ynew = mv._row2._x*r._x + mv._row2._y*r._y + mv._row2._z*r._z;
coord znew = mv._row3._x*r._x + mv._row3._y*r._y + mv._row3._z*r._z;
return Vector3(xnew, ynew, znew);
}
// Transform Vector3 to any space, given Matrix3 and origin
__device__ Vector3 inline TransformToSomeSpace(Vector3 point, Matrix3 *mv, Vector3 *origin)
{
point -= *origin;
return multiplyRightWith(*mv, point);
}
// After transformation in camera space, project and plot (used for point rendering)
#define CLIPPLANEDISTANCE 0.2f
__device__ void inline ProjectAndPlot(const Vector3& xformed, int *pixels, int defaultColor=0x00FFFFFF )
{
if (xformed._z>CLIPPLANEDISTANCE) {
int x = (int)(MAXX/2.f + FOV * xformed._y/xformed._z);
int y = (int)(MAXY/2.f - FOV * xformed._x/xformed._z);
if (y>=0.f && y<(int)MAXY && x>=0.f && x<(int)MAXX)
pixels[y*MAXX + x] = defaultColor;
}
}
////////////////////////////////////////
// Rendering kernel for MODE_POINTS
////////////////////////////////////////
__global__ void CoreLoopVertices(int *pixels, Matrix3 *cudaWorldToCameraSpace, Vector3 *eye)
{
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= VERTICES)
return;
// Simple projection and ploting of a white point per vertex
// Plot projected coordinates (on screen)
Vector3 v(tex1Dfetch(g_verticesTexture, 2*idx));
ProjectAndPlot(
TransformToSomeSpace(v, cudaWorldToCameraSpace, eye),
pixels);
}
//////////////////////////////////////////////
// Rendering kernel for MODE_POINTSHIDDEN
//////////////////////////////////////////////
// Create OpenGL BGR value for assignment in PBO buffer
__device__ int getColor(Pixel& p)
{
return (((unsigned)p._b) << 16) | (((unsigned)p._g) << 8) | (((unsigned)p._r));
}
__global__ void CoreLoopTriangles(int *pixels, Matrix3 *cudaWorldToCameraSpace, Triangle *pTriangles, Vector3 *eye)
{
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= TRIANGLES)
return;
// First check if the triangle is visible from where we stand
// (closed objects only)
float4 center = tex1Dfetch(g_trianglesTexture, 5*idx);
float4 normal = tex1Dfetch(g_trianglesTexture, 5*idx+1);
Vector3 triToEye = *eye;
triToEye -= center;
// Normally we would normalize, but since we just need the sign
// of the dot product (to determine if it facing us or not)...
//triToEye.normalize();
//if (!pTriangles[idx]._twoSided && dotCUDA(triToEye, pTriangles[idx]._normal)<0.f)
if (center.w == 0.f && dotCUDA(triToEye, normal)<0.f)
return;
int color = getColor(pTriangles[idx]._colorf);
// For each of the 3 vertices of triangle j of object i,
// transform to camera space, project and plot them
Vector3 v1(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx1));
Vector3 v2(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx2));
Vector3 v3(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx3));
ProjectAndPlot( TransformToSomeSpace(v1, cudaWorldToCameraSpace, eye), pixels, color);
ProjectAndPlot( TransformToSomeSpace(v2, cudaWorldToCameraSpace, eye), pixels, color);
ProjectAndPlot( TransformToSomeSpace(v3, cudaWorldToCameraSpace, eye), pixels, color);
}
///////////////////////////////////////////////
// Raytracing modes
///////////////////////////////////////////////
// Helper function, that checks whether a ray intersects a bbox
__device__ bool RayIntersectsBox(
const Vector3& originInWorldSpace, const Vector3& rayInWorldSpace, int boxIdx)
{
// set Tnear = - infinity, Tfar = infinity
//
// For each pair of planes P associated with X, Y, and Z do:
// (example using X planes)
// if direction Xd = 0 then the ray is parallel to the X planes, so
// if origin Xo is not between the slabs ( Xo < Xl or Xo > Xh) then
// return false
// else, if the ray is not parallel to the plane then
// begin
// compute the intersection distance of the planes
// T1 = (Xl - Xo) / Xd
// T2 = (Xh - Xo) / Xd
// If T1 > T2 swap (T1, T2) /* since T1 intersection with near plane */
// If T1 > Tnear set Tnear =T1 /* want largest Tnear */
// If T2 < Tfar set Tfar="T2" /* want smallest Tfar */
// If Tnear > Tfar box is missed so
// return false
// If Tfar < 0 box is behind ray
// return false
// end
// end of for loop
//
// If Box survived all above tests, return true with intersection point Tnear and exit point Tfar.
coord Tnear, Tfar;
Tnear = -FLT_MAX;
Tfar = FLT_MAX;
float2 limits;
#define CHECK_NEAR_AND_FAR_INTERSECTION(c) \
if (rayInWorldSpace._ ## c == 0.f) { \
if (originInWorldSpace._##c < limits.x) return false; \
if (originInWorldSpace._##c > limits.y) return false; \
} else { \
coord T1 = (limits.x - originInWorldSpace._##c)/rayInWorldSpace._##c; \
coord T2 = (limits.y - originInWorldSpace._##c)/rayInWorldSpace._##c; \
if (T1>T2) { coord tmp=T1; T1=T2; T2=tmp; } \
if (T1 > Tnear) Tnear = T1; \
if (T2 < Tfar) Tfar = T2; \
if (Tnear > Tfar) \
return false; \
if (Tfar < 0.f) \
return false; \
}
limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx); // box.bottom._x/top._x placed in limits.x/limits.y
CHECK_NEAR_AND_FAR_INTERSECTION(x)
limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx+1); // box.bottom._y/top._y placed in limits.x/limits.y
CHECK_NEAR_AND_FAR_INTERSECTION(y)
limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx+2); // box.bottom._z/top._z placed in limits.x/limits.y
CHECK_NEAR_AND_FAR_INTERSECTION(z)
return true;
}
// Templated ray/triangle intersection function - offers two compile-time options:
//
// The first one is used to discriminate between shadow rays (that stop at the first hit)
// and normal rays, that have to find the closest hit.
//
// The second one enables or disables culling of backfacing triangles, and is...
// (a) enabled for the refraction call (which needs both front and back-faces), but
// (b) disabled for reflections and shadow rays.
//
// C++ compile-time power... all lesser languages bow down... :-)
//
template <bool stopAtfirstRayHit, bool doCulling>
__device__ bool BVH_IntersectTriangles(
// Inputs
//Triangle *pTriangles,
const Vector3& origin, const Vector3& ray, unsigned avoidSelf,
// outputs
int& pBestTriIdx,
//
// both inputs and outputs!
//
// for normal rays:
// pointHitInWorldSpace (output)
// kXX (outputs) perpendicular distances of intersection point from the 3 triangle edges
// (used for PhongNormal calculations)
//
// for shadow rays:
// pointHitInWorldSpace (input) provides the light position
Vector3& pointHitInWorldSpace,
coord& kAB, coord& kBC, coord& kCA)
{
// in the loop below, maintain the closest triangle and the point where we hit it:
pBestTriIdx = -1;
coord bestTriDist;
// light position passed-in pointHitInWorldSpace (only in shadow mode - i.e. stopAtfirstRayHit=true)
Vector3& lightPos = pointHitInWorldSpace;
// Compile-time work (stopAtfirstRayHit is template param)
if (stopAtfirstRayHit)
// In shadow ray mode, start from light distance
bestTriDist = distancesqCUDA(origin, lightPos);
else
// In normal mode, start from infinity
bestTriDist = FLT_MAX;
int stack[BVH_STACK_SIZE];
int stackIdx = 0;
stack[stackIdx++] = 0;
while(stackIdx) {
int boxIdx = stack[stackIdx-1];
//CacheFriendlyBVHNode *pCurrent = &cudaBVHNodes[boxIdx];
stackIdx--;
uint4 data = tex1Dfetch(g_pCFBVHindexesOrTrilistsTexture, boxIdx);
// original, "pure" BVH form...
//if (!pCurrent->IsLeaf()) {
// cache-friendly BVH form...
//if (!(pCurrent->u.leaf._count & 0x80000000)) {
// textured BVH form...
if (!(data.x & 0x80000000)) {
if (RayIntersectsBox(origin, ray, boxIdx)) {
//stack[stackIdx++] = pCurrent->u.inner._idxRight;
stack[stackIdx++] = data.y;
//stack[stackIdx++] = pCurrent->u.inner._idxLeft;
stack[stackIdx++] = data.z;
if(stackIdx>BVH_STACK_SIZE)
{
return false; // XXX
}
}
} else {
// original, "pure" BVH form...
//BVHLeaf *p = dynamic_cast<BVHLeaf*>(pCurrent);
//for(std::list<const Triangle*>::iterator it=p->_triangles.begin();
// it != p->_triangles.end();
// it++)
// cache-friendly BVH form...
//for(unsigned i=pCurrent->u.leaf._startIndexInTriIndexList;
// i<pCurrent->u.leaf._startIndexInTriIndexList + (pCurrent->u.leaf._count & 0x7fffffff);
// textured BVH form...
for(unsigned i=data.w; i<data.w + (data.x & 0x7fffffff); i++) {
// original, "pure" BVH form...
//const Triangle& triangle = *(*it);
// cache-friendly BVH form...
//const Triangle& triangle = pTriangles[cudaTriIdxList[i]];
// textured BVH form...
int idx = tex1Dfetch(g_triIdxListTexture, i).x;
if (avoidSelf == idx)
continue; // avoid self-reflections/refractions
float4 center = tex1Dfetch(g_trianglesTexture, 5*idx);
float4 normal = tex1Dfetch(g_trianglesTexture, 5*idx+1);
// doCulling is a compile-time param, this code will be "codegenerated"
// at compile time only for reflection-related calls to Raytrace (see below)
//if (doCulling && !triangle._twoSided) {
if (doCulling && (center.w == 0.f)) { // template-param, compile-time check
// Check visibility of triangle via dot product
Vector3 fromTriToOrigin = origin;
//fromTriToOrigin -= triangle._center;
fromTriToOrigin -= center;
// Normally we would normalize, but since we just need the sign
// of the dot product (to determine if it facing us or not)...
//fromTriToOrigin.normalize();
if (dotCUDA(fromTriToOrigin, normal)<0)
continue;
}
// Use the pre-computed triangle intersection data: normal, d, e1/d1, e2/d2, e3/d3
coord k = dotCUDA(normal, ray);
if (k == 0.0f)
continue; // this triangle is parallel to the ray, ignore it.
coord s = (normal.w - dotCUDA(normal, origin))/k;
if (s <= 0.0f) // this triangle is "behind" the origin.
continue;
if (s <= NUDGE_FACTOR)
continue;
Vector3 hit = ray*s;
hit += origin;
// Is the intersection of the ray with the triangle's plane INSIDE the triangle?
float4 ee1 = tex1Dfetch(g_trianglesTexture, 5*idx+2);
coord kt1 = dotCUDA(ee1, hit) - ee1.w; if (kt1<0.0f) continue;
float4 ee2 = tex1Dfetch(g_trianglesTexture, 5*idx+3);
coord kt2 = dotCUDA(ee2, hit) - ee2.w; if (kt2<0.0f) continue;
float4 ee3 = tex1Dfetch(g_trianglesTexture, 5*idx+4);
coord kt3 = dotCUDA(ee3, hit) - ee3.w; if (kt3<0.0f) continue;
// It is, "hit" is the world space coordinate of the intersection.
// Was this a normal ray or a shadow ray? (template param)
if (stopAtfirstRayHit) {
// Shadow ray, check whether the triangle obstructs the light
coord dist = distancesqCUDA(lightPos, hit);
if (dist < bestTriDist) // distance to light (squared) passed in kAB
return true; // we found a triangle obstructing the light, return true
} else {
// Normal ray - it this intersection closer than all the others?
coord hitZ = distancesqCUDA(origin, hit);
if (hitZ < bestTriDist) {
// maintain the closest hit
bestTriDist = hitZ;
pBestTriIdx = idx;
pointHitInWorldSpace = hit;
kAB = kt1;
kBC = kt2;
kCA = kt3;
}
}
}
}
}
// Normal ray or shadow ray? (compile-time template param)
if (!stopAtfirstRayHit)
// for normal ray, return true if we pierced a triangle
return pBestTriIdx != -1;
else
// for shadow ray, return true if we found a triangle obstructing the light.
return false;
}
// CUDA 1.2 has no recursion - I therefore use the magic of C++ templates:
// Compile-time recursion using the "depth" param!
template <int depth, bool doSpecular, bool doPhongInterp, bool doReflections, bool doShadows, bool doCulling>
__device__ Pixel Raytrace(
Vector3 originInWorldSpace, Vector3 rayInWorldSpace, int avoidSelf,
Triangle *pTriangles,
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace)
{
int pBestTriIdx = -1;
const Triangle *pBestTri = NULL;
Vector3 pointHitInWorldSpace;
coord kAB=0.f, kBC=0.f, kCA=0.f; // distances from the 3 edges of the triangle (from where we hit it)
// Use the surface-area heuristic based, bounding volume hierarchy of axis-aligned bounding boxes
// (keywords: SAH, BVH, AABB)
if (!BVH_IntersectTriangles<false,doCulling>(
//pTriangles,
originInWorldSpace, rayInWorldSpace, avoidSelf,
pBestTriIdx, pointHitInWorldSpace, kAB, kBC, kCA))
// We pierced no triangle, return with no contribution (ambient is black)
return Pixel(0.f,0.f,0.f);
// Set this to pass to recursive calls below, so that we don't get self-shadow or self-reflection
// from this triangle...
avoidSelf = pBestTriIdx;
pBestTri = &pTriangles[pBestTriIdx];
// We'll also calculate the color contributed from this intersection
// Start from the triangle's color
Pixel color = pBestTri->_colorf;
// Phong interpolation of normal vector: these values are only set if
// the doPhongInterp template param is set
Vector3 phongNormal;
coord ABx,BCx,CAx,area;
float4 V1;
float4 N1;
float4 V2;
float4 N2;
float4 V3;
float4 N3;
V1 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx1);
V2 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx2);
V3 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx3);
if (doPhongInterp) { // template-param, compile-time check
// These are the closest triangle's vertices...
N1 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx1+1);
N2 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx2+1);
N3 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx3+1);
const Vector3 bestTriA = Vector3(V1.x,V1.y,V1.z);
const Vector3 bestTriB = Vector3(V2.x,V2.y,V2.z);
const Vector3 bestTriC = Vector3(V3.x,V3.y,V3.z);
// ...and their normal vectors:
const Vector3 bestTriNrmA = Vector3(N1.x,N1.y,N1.z);
const Vector3 bestTriNrmB = Vector3(N2.x,N2.y,N2.z);
const Vector3 bestTriNrmC = Vector3(N3.x,N3.y,N3.z);
// We now want to interpolate the triangle's normal,
// so that as the "pointHitInWorldSpace" gets closer to
// a vertex X, the interpolated normal becomes closer to bestTriNrmX,
// and becomes EXACTLY bestTriNrmX, if the pointHitInWorldSpace is X.
//
// To do that, we use the 3 areas of the triangle, as it is divided
// by the pointHitInWorldSpace.
//
// This is the total triangle's area: cross product of two edges
// (in fact, we should divide by 2, but since we're only interested
// in ratios (see below), there is no need)
Vector3 AB = bestTriB; AB-= bestTriA; // edge AB
Vector3 BC = bestTriC; BC-= bestTriB; // edge BC
Vector3 crossAB_BC = crossCUDA(AB, BC);
area = crossAB_BC.length(); // 2*area(ABC)
// And these are the three sub-triangles - kAB,kBC,kCA were found above...
ABx = kAB*distanceCUDA(bestTriA, bestTriB);
BCx = kBC*distanceCUDA(bestTriB, bestTriC);
CAx = kCA*distanceCUDA(bestTriC, bestTriA);
// use the area of the sub-triangle ACROSS a point, to scale!
// (which means that if pointHitInCameraSpace is exactly on a vertex,
// the area of the sub-triangle becomes the area of the triangle!)
Vector3 phongNormalA = bestTriNrmA; phongNormalA *= BCx / area;
Vector3 phongNormalB = bestTriNrmB; phongNormalB *= CAx / area;
Vector3 phongNormalC = bestTriNrmC; phongNormalC *= ABx / area;
// and finally, accumulate the three contributions and normalize.
phongNormal = phongNormalA + phongNormalB + phongNormalC;
phongNormal.normalize();
} else
phongNormal = pBestTri->_normal;
#ifdef AMBIENT_OCCLUSION
// Calculate ambient occlusion - throw AMBIENT_SAMPLES number of random rays
// in the hemisphere formed from the pointHitInWorldSpace and the normal vector...
int i=0;
coord totalLight = 0.f, maxLight = 0.f;
while (i<AMBIENT_SAMPLES) {
Vector3 ambientRay = phongNormal;
ambientRay._x += float(rand()-RAND_MAX/2)/(RAND_MAX/2);
ambientRay._y += float(rand()-RAND_MAX/2)/(RAND_MAX/2);
ambientRay._z += float(rand()-RAND_MAX/2)/(RAND_MAX/2);
float cosangle = dotCUDA(ambientRay, phongNormal);
if (cosangle<0.f) continue;
i++;
maxLight += cosangle;
ambientRay.normalize();
Vector3 temp(pointHitInWorldSpace);
temp += ambientRay*AMBIENT_RANGE;
const Triangle *dummy;
// Some objects needs a "nudge", to avoid self-shadowing
//Vector3 nudgedPointHitInWorldSpace = pointHitInWorldSpace;
//nudgedPointHitInWorldSpace += ambientRay*.005f;
//if (!BVH_IntersectTriangles<true,true>(
// nudgedPointHitInWorldSpace, ambientRay, avoidSelf,
if (!BVH_IntersectTriangles<true,true>(
pTriangles,
pointHitInWorldSpace, ambientRay, avoidSelf,
dummy, temp, kAB, kAB, kAB)) {
// Accumulate contribution of this random ray
totalLight += cosangle;
}
}
// total ambient light, averaged over all random rays
color *= (AMBIENT/255.0f)*(totalLight/maxLight);
#else
// Dont calculate ambient occlusion, use the pre-calculated value from the model
// (assuming it exists!)
coord ambientOcclusionCoeff;
if (doPhongInterp) { // template-param, compile-time check
// we have a phong normal, so use the subtriangle areas
// to interpolate the 3 ambientOcclusionCoeff values
ambientOcclusionCoeff =
V1.w*BCx/area +
V2.w*CAx/area +
V3.w*ABx/area;
/*pVertices[pBestTri->_idx1]._ambientOcclusionCoeff*BCx/area +
pVertices[pBestTri->_idx2]._ambientOcclusionCoeff*CAx/area +
pVertices[pBestTri->_idx3]._ambientOcclusionCoeff*ABx/area;*/
} else {
// we dont have a phong normal, just average the 3 values of the vertices
ambientOcclusionCoeff = (V1.w + V2.w + V3.w)/3.f;
/*pVertices[pBestTri->_idx1]._ambientOcclusionCoeff +
pVertices[pBestTri->_idx2]._ambientOcclusionCoeff +
pVertices[pBestTri->_idx3]._ambientOcclusionCoeff)/3.f;*/
}
coord ambientFactor = (coord) ((AMBIENT*ambientOcclusionCoeff/255.0f)/255.0f);
color *= ambientFactor;
#endif // AMBIENT_OCCLUSION
/*
// Now, for all the lights...
for(unsigned i=0; i<scene._lights.size(); i++) {
Light& light = *scene._lights[i];
*/
Vector3& light = *cudaLightPosInWorldSpace;
// This light's diffuse and specular contribution
Pixel dColor = Pixel(); // start with black
// We calculate the vector from point hit, to light (both in world space).
Vector3 pointToLight = light;
pointToLight -= pointHitInWorldSpace;
bool inShadow = false;
if (doShadows) { // template-param, compile-time check
// this is our distance from the light (squared, i.e. we didnt use an sqrt)
coord distanceFromLightSq = pointToLight.lengthsq();
Vector3 shadowrayInWorldSpace = pointToLight;
shadowrayInWorldSpace /= sqrt(distanceFromLightSq);
int pDummy; // just to fill-in the param, not used for shadowrays
if (BVH_IntersectTriangles<true,doCulling>(
//pTriangles,
pointHitInWorldSpace, shadowrayInWorldSpace, avoidSelf,
pDummy, // dummy
light,
kAB, kAB, kAB)) // dummies
{
//continue; // we were in shadow, go to next light
inShadow = true;
}
}
if (!inShadow) {
// Diffuse color
pointToLight.normalize(); // vector from point to light (in world space)
coord intensity = dotCUDA(phongNormal, pointToLight);
if (intensity<0.f) {
; // in shadow, let it be in ambient
} else {
Pixel diffuse = pBestTri->_colorf;
diffuse *= (coord) (DIFFUSE*intensity/255.f); // diffuse set to a maximum of 130/255
dColor += diffuse;
if (doSpecular) { // template-param, compile-time check
// Specular color
// We will use the half vector: pointToLight + point to camera
Vector3 pointToCamera = *cudaEyePosInWorldSpace;
pointToCamera -= pointHitInWorldSpace;
pointToCamera.normalize();
Vector3 half = pointToLight;
half += pointToCamera;
half.normalize();
// use the interpolated phong normal!
coord intensity2 = dotCUDA(half, phongNormal);
if (intensity2>0.f) {
intensity2 *= intensity2;
intensity2 *= intensity2;
intensity2 *= intensity2;
intensity2 *= intensity2;
intensity2 *= intensity2;
dColor += Pixel(
(unsigned char)(SPECULAR*intensity2),
(unsigned char)(SPECULAR*intensity2),
(unsigned char)(SPECULAR*intensity2));
}
}
}
color += dColor;
}
// } for each light
if (!doReflections)
return color;
else {
originInWorldSpace = pointHitInWorldSpace;
const Vector3& nrm = phongNormal;
float c1 = -dotCUDA(rayInWorldSpace, nrm);
// Reflections:
//
// ray = ray - 2 (ray dot normal) normal
Vector3 reflectedRay = rayInWorldSpace;
reflectedRay += nrm*(2.0f*c1);
reflectedRay.normalize();
return
color
/* use backface culling for reflection rays: <true> */
+ Raytrace<depth+1, doSpecular, doPhongInterp, doReflections, doShadows, true>(
originInWorldSpace, reflectedRay, avoidSelf,
pTriangles,
cudaEyePosInWorldSpace, cudaLightPosInWorldSpace) * REFLECTIONS_RATE
/* ...but not for refraction rays: <false>
REMOVED, 2011/02/04
*/
;
}
}
// CUDA 1.2 has no recursion - I therefore use the magic of C++ templates:
// Compile-time recursion using the "depth" param!
//
// These are the template specializations that stop the compile-time recursion
// at MAX_RAY_DEPTH level.
#define STOP_RECURSION(a,b,c,d,e) \
template <> \
__device__ Pixel Raytrace<MAX_RAY_DEPTH,a,b,c,d,e>( \
Vector3 originInWorldSpace, Vector3 rayInWorldSpace, int avoidSelf, \
Triangle *pTriangles, \
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace) \
{ \
return Pixel(0.f,0.f,0.f); \
}
#define f false
#define t true
STOP_RECURSION(f,f,f,f,f)
STOP_RECURSION(f,f,f,f,t)
STOP_RECURSION(f,f,f,t,f)
STOP_RECURSION(f,f,f,t,t)
STOP_RECURSION(f,f,t,f,f)
STOP_RECURSION(f,f,t,f,t)
STOP_RECURSION(f,f,t,t,f)
STOP_RECURSION(f,f,t,t,t)
STOP_RECURSION(f,t,f,f,f)
STOP_RECURSION(f,t,f,f,t)
STOP_RECURSION(f,t,f,t,f)
STOP_RECURSION(f,t,f,t,t)
STOP_RECURSION(f,t,t,f,f)
STOP_RECURSION(f,t,t,f,t)
STOP_RECURSION(f,t,t,t,f)
STOP_RECURSION(f,t,t,t,t)
STOP_RECURSION(t,f,f,f,f)
STOP_RECURSION(t,f,f,f,t)
STOP_RECURSION(t,f,f,t,f)
STOP_RECURSION(t,f,f,t,t)
STOP_RECURSION(t,f,t,f,f)
STOP_RECURSION(t,f,t,f,t)
STOP_RECURSION(t,f,t,t,f)
STOP_RECURSION(t,f,t,t,t)
STOP_RECURSION(t,t,f,f,f)
STOP_RECURSION(t,t,f,f,t)
STOP_RECURSION(t,t,f,t,f)
STOP_RECURSION(t,t,f,t,t)
STOP_RECURSION(t,t,t,f,f)
STOP_RECURSION(t,t,t,f,t)
STOP_RECURSION(t,t,t,t,f)
STOP_RECURSION(t,t,t,t,t)
#undef f
#undef t
// Main CUDA kernel, templated, to support each of the desired features:
//
// - using specular lights or not
// - doing Phong normal interpolation or not
// - doing reflections or not
// - doing shadows or not
// - doing anti-alias or not
//
template <bool doSpecular, bool doPhongInterp, bool doReflections, bool doShadows, bool antialias>
__global__ void CoreLoopTrianglesRaycaster(
int *pixels,
Matrix3 *cudaWorldToCameraSpace,
Triangle *pTriangles,
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace,
unsigned *cudaMortonTable)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx>=MAXX*MAXY)
return;
// without MORTON_ORDER
// int x = idx % MAXX;
// int y = idx / MAXX;
// This code will not allocate idx to a normal, left-to-right, top-to-bottom
// allocation of pixels. Instead, it will use the pre-calculated Morton order
// of pixels in screen space, to make threads end up reading the same (hopefully)
// BVH and triangle data (more co-alesced, cache-friendly accesses).
int x = int(cudaMortonTable[idx] & 0xFFFF);
int y = int((cudaMortonTable[idx] & 0xFFFF0000)>>16);
Pixel finalColor(0,0,0);
int pixelsTraced = 1;
if (antialias)
pixelsTraced = 4;
while(pixelsTraced--) {
// We will shoot a ray in camera space (from Eye to the screen point, so in camera
// space, from (0,0,0) to this:
coord xx = (coord)x;
coord yy = (coord)y;
if (antialias) {
// nudge in a cross pattern around the pixel center
xx += 0.25f - .5f*(pixelsTraced&1);
yy += 0.25f - .5f*((pixelsTraced&2)>>1);
}
coord lx = coord((MAXY/2)-yy)/SCREEN_DIST;
coord ly = coord(xx-(MAXX/2))/SCREEN_DIST;
coord lz = 1.0f;
Vector3 rayInCameraSpace(lx,ly,lz);
rayInCameraSpace.normalize();
// We will need the origin in world space
Vector3 originInWorldSpace = *cudaEyePosInWorldSpace;
// We have a rayInCameraSpace, and we want to use the BVH, which was constructed
// in World space, so we convert the ray in World space
Vector3 rayInWorldSpace = cudaWorldToCameraSpace->_row1 * rayInCameraSpace._x;
rayInWorldSpace += cudaWorldToCameraSpace->_row2 * rayInCameraSpace._y;
rayInWorldSpace += cudaWorldToCameraSpace->_row3 * rayInCameraSpace._z;
// in theory, this should not be required
rayInWorldSpace.normalize();
// Primary ray, level 0, and we want backface culling: <true>
finalColor += Raytrace<0, doSpecular, doPhongInterp, doReflections, doShadows, true>(
originInWorldSpace, rayInWorldSpace, -1,
pTriangles,
cudaEyePosInWorldSpace, cudaLightPosInWorldSpace);
}
if (antialias)
finalColor /= 4.f;
if (finalColor._r>255.0f) finalColor._r=255.0f;
if (finalColor._g>255.0f) finalColor._g=255.0f;
if (finalColor._b>255.0f) finalColor._b=255.0f;
int color = getColor(finalColor);
// without MORTON_ORDER
//pixels[idx] = color;
pixels[y*MAXX+x] = color;
}
// The bridge to the normal C++ world: templated, to include only the mode-specific code in each incantation
bool g_bFirstTime = true;
void CudaRender(
Matrix3 *cudaWorldToCameraSpace,
Vertex *cudaPtrVertices, Triangle *cudaPtrTriangles, float *cudaTriangleIntersectionData,
int *cudaTriIdxList, float *cudaBVHlimits, int *cudaBVHindexesOrTrilists,
Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace,
unsigned *cudaMortonTable)
{
if (g_bFirstTime) {
// The first time any of the CudaRender templates is called,
// bind the texture data!
g_bFirstTime = false;
cudaChannelFormatDesc channel1desc = cudaCreateChannelDesc<uint1>();
cudaBindTexture(NULL, &g_triIdxListTexture, cudaTriIdxList, &channel1desc, g_triIndexListNo*sizeof(uint1));
cudaChannelFormatDesc channel2desc = cudaCreateChannelDesc<float2>();
cudaBindTexture(NULL, &g_pCFBVHlimitsTexture, cudaBVHlimits, &channel2desc, g_pCFBVH_No*6*sizeof(float));
cudaChannelFormatDesc channel3desc = cudaCreateChannelDesc<int4>();
cudaBindTexture(NULL, &g_pCFBVHindexesOrTrilistsTexture, cudaBVHindexesOrTrilists, &channel3desc,
g_pCFBVH_No*sizeof(uint4));
cudaChannelFormatDesc channel4desc = cudaCreateChannelDesc<float4>();
cudaBindTexture(NULL, &g_verticesTexture, cudaPtrVertices, &channel4desc, g_verticesNo*8*sizeof(float));
cudaChannelFormatDesc channel5desc = cudaCreateChannelDesc<float4>();
cudaBindTexture(NULL, &g_trianglesTexture, cudaTriangleIntersectionData, &channel5desc, g_trianglesNo*20*sizeof(float));
}
int *pixels;
glBindTexture(GL_TEXTURE_2D, tex);
SAFE(cudaGLMapBufferObject((void**)&pixels, buffer));
if (g_bUsePoints) {
cudaMemset(pixels, 0x40, MAXX*MAXY*sizeof(unsigned)); // Clear all pixels to ambient
int blocksVertices = (g_verticesNo + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
CoreLoopVertices<<< blocksVertices, THREADS_PER_BLOCK >>>(
pixels, cudaWorldToCameraSpace, cudaEyePosInWorldSpace);
/*
cudaMemset(pixels, 0x40, MAXX*MAXY*sizeof(unsigned)); // Clear all pixels to ambient
int blocksTriangles = (g_trianglesNo + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
CoreLoopTriangles<<< blocksTriangles, THREADS_PER_BLOCK >>>(
pixels, cudaWorldToCameraSpace, cudaPtrTriangles, cudaEyePosInWorldSpace);*/
} else {
int blockPixels = (MAXY*MAXX + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
DBG_PUTS(1, "CoreLoopTrianglesRaycaster begins");
#define PAINT(bDoSpecular,bDoPhongInterp,bDoReflections,bDoShadows,bDoAntialias) \
CoreLoopTrianglesRaycaster<bDoSpecular,bDoPhongInterp,bDoReflections,bDoShadows,bDoAntialias> \
<<< blockPixels, THREADS_PER_BLOCK >>>( \
pixels, \
cudaWorldToCameraSpace, \
cudaPtrTriangles, \
cudaEyePosInWorldSpace, cudaLightPosInWorldSpace, \
cudaMortonTable);
// Being templated, one can use the CoreLoopTrianglesRaycaster in any form one wants.
// In theory, C++ compilers don't need this expansion (which I wrote with a simple Python script)
// Unfortunately, we can't pass runtime vars in template params, not even when they are enumerants
// or booleans...
PAINT( true , true , true , true , true )
}
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
SAFE(cudaThreadSynchronize());
SAFE(cudaGLUnmapBufferObject(buffer));
// Use OpenGL texture to display the generated frame at lightning speed
// (the PBO buffer is already on the card, no useless PCI bus criss-cross)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, MAXX, MAXY, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(1.0f, 1.0f);
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, 1.0f);
glEnd();
// hack:
extern bool g_benchmark;
if (!g_benchmark) {
// Display the "Press H for help" message
glDisable(GL_LIGHTING);
glDisable(GL_TEXTURE_2D);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2f(-0.95, 0.9);
const char *help = "Press H for help";
for(unsigned o=0;o<strlen(help); o++)
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, help[o]);
}
SDL_GL_SwapBuffers();
}
|
ac210c7f2e84c0b9c2234946637048924b369ef8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <assert.h>
#include <time.h>
#include <math.h>
/*
Author: Andrew DiPrinzio
Course: EN605.417.FA
Assignment: Module 4
Resources: https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/
*/
static const uint32_t DEFAULT_NUM_THREADS = 1024;
static const uint32_t DEFAULT_BLOCK_SIZE = 16;
static void usage(){
printf("Usage: ./assignment4 [-t <num_threads>] [-b <block_size>] [-h]\n");
printf("\t-t: Specify the number of threads. <num_threads> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_THREADS);
printf("\t-b: Specify the size of each block. <block_size> must be greater than 0. Optional (default %u)\n", DEFAULT_BLOCK_SIZE);
}
// Structure that holds program arguments specifying number of threads/blocks
// to use.
typedef struct {
uint32_t num_threads;
uint32_t block_size;
} Arguments;
// Parse the command line arguments using getopt and return an Argument structure
// GetOpt requies the POSIX C Library
static Arguments parse_arguments(const int argc, char ** argv){
// Argument format string for getopt
static const char * _ARG_STR = "ht:b:";
// Initialize arguments to their default values
Arguments args;
args.num_threads = DEFAULT_NUM_THREADS;
args.block_size = DEFAULT_BLOCK_SIZE;
// Parse any command line options
int c;
int value;
while ((c = getopt(argc, argv, _ARG_STR)) != -1) {
switch (c) {
case 't':
value = atoi(optarg);
args.num_threads = value;
break;
case 'b':
// Normal argument
value = atoi(optarg);
args.block_size = value;
break;
case 'h':
// 'help': print usage, then exit
// note the fall through
usage();
default:
exit(-1);
}
}
return args;
}
//Kernel that adds two vectors
__global__
void add_ab(int *a, const int *b)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
a[thread_idx] += b[thread_idx];
}
// Helper function to generate a random number within a defined range
int random(int min, int max){
return min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
void measure_kern_speed(Arguments args, int * a , int * b, int * a_d, int * b_d)
{
// create events for timing
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
int array_size = args.num_threads;
const unsigned int array_size_in_bytes = array_size * sizeof(int);
hipEventRecord(startEvent, 0);
hipMemcpy( a_d, a, array_size_in_bytes, hipMemcpyHostToDevice );
hipMemcpy( b_d, b, array_size_in_bytes, hipMemcpyHostToDevice );
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
const unsigned int num_blocks = array_size / args.block_size;
const unsigned int num_threads_per_blk = array_size/num_blocks;
float time;
hipEventElapsedTime(&time, startEvent, stopEvent);
printf(" Host to Device bandwidth (GB/s): %f\n", array_size_in_bytes * 1e-6 * 2 / time);
/* Execute our kernel */
hipLaunchKernelGGL(( add_ab), dim3(num_blocks), dim3(num_threads_per_blk), 0, 0, a_d, b_d);
hipEventRecord(startEvent, 0);
hipMemcpy(a, a_d, array_size_in_bytes, hipMemcpyDeviceToHost );
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
printf(" Device to Host bandwidth (GB/s): %f\n", array_size_in_bytes * 1e-6 / time);
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
}
void run_experemnt(Arguments args){
int * a;
int * b;
int *h_aPinned, *h_bPinned;
int array_size = args.num_threads;
const unsigned int array_size_in_bytes = array_size * sizeof(int);
/* Randomly generate input vectors and dynamically allocate their memory */
a = (int*)malloc(array_size_in_bytes);
b = (int*)malloc(array_size_in_bytes);
hipHostMalloc((void**)&h_aPinned, array_size_in_bytes);
hipHostMalloc((void**)&h_bPinned, array_size_in_bytes);
int i;
for (i = 0; i < array_size; i++) {
a[i] = random(0,100);
}
for (i = 0; i < array_size; i++) {
b[i] = random(0,100);
}
memcpy(h_aPinned, a, array_size_in_bytes);
memcpy(h_bPinned, b, array_size_in_bytes);
/* Declare pointers for GPU based params */
int *a_d;
int *b_d;
hipMalloc((void**)&a_d, array_size_in_bytes);
hipMalloc((void**)&b_d, array_size_in_bytes);
printf("Simple Pagaeable\n");
measure_kern_speed(args, a, b, a_d, b_d);
printf("Simple Pinned\n");
measure_kern_speed(args, h_aPinned, h_bPinned, a_d, b_d);
//free memory
hipFree(a_d);
hipFree(b_d);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(a);
free(b);
}
int main(int argc, char ** argv)
{
Arguments args = parse_arguments(argc, argv);
printf("Num Threads: %u, Block Size: %u\n", args.num_threads, args.block_size);
run_experemnt(args);
return EXIT_SUCCESS;
}
| ac210c7f2e84c0b9c2234946637048924b369ef8.cu | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <assert.h>
#include <time.h>
#include <math.h>
/*
Author: Andrew DiPrinzio
Course: EN605.417.FA
Assignment: Module 4
Resources: https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/
*/
static const uint32_t DEFAULT_NUM_THREADS = 1024;
static const uint32_t DEFAULT_BLOCK_SIZE = 16;
static void usage(){
printf("Usage: ./assignment4 [-t <num_threads>] [-b <block_size>] [-h]\n");
printf("\t-t: Specify the number of threads. <num_threads> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_THREADS);
printf("\t-b: Specify the size of each block. <block_size> must be greater than 0. Optional (default %u)\n", DEFAULT_BLOCK_SIZE);
}
// Structure that holds program arguments specifying number of threads/blocks
// to use.
typedef struct {
uint32_t num_threads;
uint32_t block_size;
} Arguments;
// Parse the command line arguments using getopt and return an Argument structure
// GetOpt requies the POSIX C Library
static Arguments parse_arguments(const int argc, char ** argv){
// Argument format string for getopt
static const char * _ARG_STR = "ht:b:";
// Initialize arguments to their default values
Arguments args;
args.num_threads = DEFAULT_NUM_THREADS;
args.block_size = DEFAULT_BLOCK_SIZE;
// Parse any command line options
int c;
int value;
while ((c = getopt(argc, argv, _ARG_STR)) != -1) {
switch (c) {
case 't':
value = atoi(optarg);
args.num_threads = value;
break;
case 'b':
// Normal argument
value = atoi(optarg);
args.block_size = value;
break;
case 'h':
// 'help': print usage, then exit
// note the fall through
usage();
default:
exit(-1);
}
}
return args;
}
//Kernel that adds two vectors
__global__
void add_ab(int *a, const int *b)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
a[thread_idx] += b[thread_idx];
}
// Helper function to generate a random number within a defined range
int random(int min, int max){
return min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
void measure_kern_speed(Arguments args, int * a , int * b, int * a_d, int * b_d)
{
// create events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
int array_size = args.num_threads;
const unsigned int array_size_in_bytes = array_size * sizeof(int);
cudaEventRecord(startEvent, 0);
cudaMemcpy( a_d, a, array_size_in_bytes, cudaMemcpyHostToDevice );
cudaMemcpy( b_d, b, array_size_in_bytes, cudaMemcpyHostToDevice );
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
const unsigned int num_blocks = array_size / args.block_size;
const unsigned int num_threads_per_blk = array_size/num_blocks;
float time;
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Host to Device bandwidth (GB/s): %f\n", array_size_in_bytes * 1e-6 * 2 / time);
/* Execute our kernel */
add_ab<<<num_blocks, num_threads_per_blk>>>(a_d, b_d);
cudaEventRecord(startEvent, 0);
cudaMemcpy(a, a_d, array_size_in_bytes, cudaMemcpyDeviceToHost );
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
printf(" Device to Host bandwidth (GB/s): %f\n", array_size_in_bytes * 1e-6 / time);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void run_experemnt(Arguments args){
int * a;
int * b;
int *h_aPinned, *h_bPinned;
int array_size = args.num_threads;
const unsigned int array_size_in_bytes = array_size * sizeof(int);
/* Randomly generate input vectors and dynamically allocate their memory */
a = (int*)malloc(array_size_in_bytes);
b = (int*)malloc(array_size_in_bytes);
cudaMallocHost((void**)&h_aPinned, array_size_in_bytes);
cudaMallocHost((void**)&h_bPinned, array_size_in_bytes);
int i;
for (i = 0; i < array_size; i++) {
a[i] = random(0,100);
}
for (i = 0; i < array_size; i++) {
b[i] = random(0,100);
}
memcpy(h_aPinned, a, array_size_in_bytes);
memcpy(h_bPinned, b, array_size_in_bytes);
/* Declare pointers for GPU based params */
int *a_d;
int *b_d;
cudaMalloc((void**)&a_d, array_size_in_bytes);
cudaMalloc((void**)&b_d, array_size_in_bytes);
printf("Simple Pagaeable\n");
measure_kern_speed(args, a, b, a_d, b_d);
printf("Simple Pinned\n");
measure_kern_speed(args, h_aPinned, h_bPinned, a_d, b_d);
//free memory
cudaFree(a_d);
cudaFree(b_d);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(a);
free(b);
}
int main(int argc, char ** argv)
{
Arguments args = parse_arguments(argc, argv);
printf("Num Threads: %u, Block Size: %u\n", args.num_threads, args.block_size);
run_experemnt(args);
return EXIT_SUCCESS;
}
|
5e0704c5276d401d4926e00a4709d41b329a95cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#define NUMBER 23
__global__ void uncoalesced();
__global__ void uncoalesced(int *d_A){
int blockIdfinal,threadIdfinal;
blockIdfinal=blockIdx.y+gridDim.y*blockIdx.x;
// printf(" lol %d %d %d",);
threadIdfinal=blockIdfinal*blockDim.x*blockDim.y + (threadIdx.y+threadIdx.x*blockDim.y);
if(threadIdfinal<NUMBER){
*(d_A+threadIdfinal)=100;
// printf(" %d",threadIdfinal);
}
}
__global__ void trial(int *d_A){
printf("this has to be fast %d %d \n ",threadIdx.x,threadIdx.y);
}
int main(int argc, char *argv[]){
// printf("\nUncoalesced accesses to the Global memory of Dram of GPU\n");
/* dim3 block(4,4);
dim3 thread(3,3);
trial<<<block,thread>>>();
hipDeviceSynchronize();*/
size_t size=NUMBER*sizeof(int);
int *d_A;
hipMalloc(&d_A,size);
dim3 block(5,4);
dim3 thread(10,1);
hipLaunchKernelGGL(( uncoalesced), dim3(block),dim3(thread), 0, 0, d_A);
int *h_A;
h_A=(int *)malloc(sizeof(int)*NUMBER);
hipMemcpy(h_A,d_A,size,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(int i=0;i<NUMBER;i++){
printf(" %d) %d",i,h_A[i]);
}
return 0;
}
| 5e0704c5276d401d4926e00a4709d41b329a95cd.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#define NUMBER 23
__global__ void uncoalesced();
__global__ void uncoalesced(int *d_A){
int blockIdfinal,threadIdfinal;
blockIdfinal=blockIdx.y+gridDim.y*blockIdx.x;
// printf(" lol %d %d %d",);
threadIdfinal=blockIdfinal*blockDim.x*blockDim.y + (threadIdx.y+threadIdx.x*blockDim.y);
if(threadIdfinal<NUMBER){
*(d_A+threadIdfinal)=100;
// printf(" %d",threadIdfinal);
}
}
__global__ void trial(int *d_A){
printf("this has to be fast %d %d \n ",threadIdx.x,threadIdx.y);
}
int main(int argc, char *argv[]){
// printf("\nUncoalesced accesses to the Global memory of Dram of GPU\n");
/* dim3 block(4,4);
dim3 thread(3,3);
trial<<<block,thread>>>();
cudaDeviceSynchronize();*/
size_t size=NUMBER*sizeof(int);
int *d_A;
cudaMalloc(&d_A,size);
dim3 block(5,4);
dim3 thread(10,1);
uncoalesced<<<block,thread>>>(d_A);
int *h_A;
h_A=(int *)malloc(sizeof(int)*NUMBER);
cudaMemcpy(h_A,d_A,size,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int i=0;i<NUMBER;i++){
printf(" %d) %d",i,h_A[i]);
}
return 0;
}
|
9106e1f6f15fad49a97655e4bc4d95f6b05f5595.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/cholesky_op.h"
#include "paddle/fluid/platform/dynload/cusolver.h"
namespace paddle {
namespace operators {
template <typename T>
class CholeskyGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const Tensor* x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
bool upper = context.Attr<bool>("upper");
auto& dims = x->dims();
int batch_count = 1;
for (int i = 0; i < dims.size() - 2; i++) {
batch_count *= dims[i];
}
int m = dims[dims.size() - 1];
int tensor_size = batch_count * m * m;
const auto* x_data = x->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
// matrices are assumed to be stored in column-major order in cusolver
hipblasFillMode_t uplo =
upper ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER;
// portf is inplace, thus copy the triangular part of the input matrices to
// the output and set the other triangular part to 0 firstly
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx,
tensor_size);
if (upper) {
MatrixBandPartFunctor<T> matrix_band_part_functor(
m, m, /* num_lower_diags */ 0, /* num_upper_diags */ m, x_data,
out_data);
for_range(matrix_band_part_functor);
} else {
MatrixBandPartFunctor<T> matrix_band_part_functor(
m, m, /* num_lower_diags */ m, /* num_upper_diags */ 0, x_data,
out_data);
for_range(matrix_band_part_functor);
}
auto info = memory::Alloc(dev_ctx, sizeof(int) * batch_count);
auto* info_ptr = reinterpret_cast<int*>(info->ptr());
#if TORCH_HIP_VERSION >= 9020 && !defined(_WIN32)
if (batch_count > 1) {
std::vector<T*> output_ptrs;
for (int i = 0; i < batch_count; i++) {
output_ptrs.emplace_back(out_data + i * m * m);
}
thrust::device_vector<T*> dev_output_ptrs(output_ptrs.begin(),
output_ptrs.end());
PotrfBatched(dev_ctx, uplo, m,
thrust::raw_pointer_cast(dev_output_ptrs.data()), m,
info_ptr, batch_count);
// TODO(guosheng): There seems to a bug in cusolver potrfBatched and need
// to clear the upper triangle of the output. Remove this workaround once
// the bug is fixed.
if (!upper) {
MatrixBandPartFunctor<T> matrix_band_part_functor(
m, m, /* num_lower_diags */ m, /* num_upper_diags */ 0, out_data,
out_data);
for_range(matrix_band_part_functor);
}
} else {
#endif
for (int i = 0; i < batch_count; i++) {
Potrf(dev_ctx, uplo, m, out_data + i * m * m, m, info_ptr + i);
}
#if TORCH_HIP_VERSION >= 9020 && !defined(_WIN32)
}
#endif
// check the info
std::vector<int> error_info; // only for checking positive matrix
error_info.resize(batch_count);
memory::Copy(platform::CPUPlace(), error_info.data(),
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
info_ptr, sizeof(int) * batch_count, dev_ctx.stream());
for (int i = 0; i < batch_count; ++i) {
PADDLE_ENFORCE_EQ(error_info[i], 0,
platform::errors::PreconditionNotMet(
"For batch [%d]: U(%d, %d) is zero, singular U.", i,
error_info[i], error_info[i]));
}
}
void Potrf(const platform::CUDADeviceContext& dev_ctx, hipblasFillMode_t uplo,
int n, T* A, int lda, int* info) const;
void PotrfBatched(const platform::CUDADeviceContext& dev_ctx,
hipblasFillMode_t uplo, int n, T* Aarray[], int lda,
int* info_array, int batch_size) const;
};
#define FUNC_WITH_TYPES(m) m(float, S) m(double, D)
#define POTRF_INSTANCE(T, C) \
template <> \
void CholeskyGPUKernel<T>::Potrf(const platform::CUDADeviceContext& dev_ctx, \
hipblasFillMode_t uplo, int n, T* A, \
int lda, int* info) const { \
auto handle = dev_ctx.cusolver_dn_handle(); \
int workspace_size = 0; \
PADDLE_ENFORCE_CUDA_SUCCESS( \
platform::dynload::cusolverDn##C##potrf_bufferSize( \
handle, uplo, n, A, lda, &workspace_size)); \
auto workspace = memory::Alloc(dev_ctx, workspace_size); \
T* workspace_ptr = reinterpret_cast<T*>(workspace->ptr()); \
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDn##C##potrf( \
handle, uplo, n, A, lda, workspace_ptr, workspace_size, info)); \
}
FUNC_WITH_TYPES(POTRF_INSTANCE);
#if TORCH_HIP_VERSION >= 9020 && !defined(_WIN32)
#define POTRF_BATCH_INSTANCE(T, C) \
template <> \
void CholeskyGPUKernel<T>::PotrfBatched( \
const platform::CUDADeviceContext& dev_ctx, hipblasFillMode_t uplo, \
int n, T* Aarray[], int lda, int* info_array, int batch_size) const { \
auto handle = dev_ctx.cusolver_dn_handle(); \
PADDLE_ENFORCE_CUDA_SUCCESS( \
platform::dynload::cusolverDn##C##potrfBatched( \
handle, uplo, n, Aarray, lda, info_array, batch_size)); \
}
FUNC_WITH_TYPES(POTRF_BATCH_INSTANCE);
#endif
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cholesky, ops::CholeskyGPUKernel<float>,
ops::CholeskyGPUKernel<double>);
REGISTER_OP_CUDA_KERNEL(
cholesky_grad,
ops::CholeskyGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CholeskyGradKernel<paddle::platform::CUDADeviceContext, double>);
#endif // not PADDLE_WITH_HIP
| 9106e1f6f15fad49a97655e4bc4d95f6b05f5595.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/cholesky_op.h"
#include "paddle/fluid/platform/dynload/cusolver.h"
namespace paddle {
namespace operators {
template <typename T>
class CholeskyGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const Tensor* x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
bool upper = context.Attr<bool>("upper");
auto& dims = x->dims();
int batch_count = 1;
for (int i = 0; i < dims.size() - 2; i++) {
batch_count *= dims[i];
}
int m = dims[dims.size() - 1];
int tensor_size = batch_count * m * m;
const auto* x_data = x->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
// matrices are assumed to be stored in column-major order in cusolver
cublasFillMode_t uplo =
upper ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER;
// portf is inplace, thus copy the triangular part of the input matrices to
// the output and set the other triangular part to 0 firstly
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx,
tensor_size);
if (upper) {
MatrixBandPartFunctor<T> matrix_band_part_functor(
m, m, /* num_lower_diags */ 0, /* num_upper_diags */ m, x_data,
out_data);
for_range(matrix_band_part_functor);
} else {
MatrixBandPartFunctor<T> matrix_band_part_functor(
m, m, /* num_lower_diags */ m, /* num_upper_diags */ 0, x_data,
out_data);
for_range(matrix_band_part_functor);
}
auto info = memory::Alloc(dev_ctx, sizeof(int) * batch_count);
auto* info_ptr = reinterpret_cast<int*>(info->ptr());
#if CUDA_VERSION >= 9020 && !defined(_WIN32)
if (batch_count > 1) {
std::vector<T*> output_ptrs;
for (int i = 0; i < batch_count; i++) {
output_ptrs.emplace_back(out_data + i * m * m);
}
thrust::device_vector<T*> dev_output_ptrs(output_ptrs.begin(),
output_ptrs.end());
PotrfBatched(dev_ctx, uplo, m,
thrust::raw_pointer_cast(dev_output_ptrs.data()), m,
info_ptr, batch_count);
// TODO(guosheng): There seems to a bug in cusolver potrfBatched and need
// to clear the upper triangle of the output. Remove this workaround once
// the bug is fixed.
if (!upper) {
MatrixBandPartFunctor<T> matrix_band_part_functor(
m, m, /* num_lower_diags */ m, /* num_upper_diags */ 0, out_data,
out_data);
for_range(matrix_band_part_functor);
}
} else {
#endif
for (int i = 0; i < batch_count; i++) {
Potrf(dev_ctx, uplo, m, out_data + i * m * m, m, info_ptr + i);
}
#if CUDA_VERSION >= 9020 && !defined(_WIN32)
}
#endif
// check the info
std::vector<int> error_info; // only for checking positive matrix
error_info.resize(batch_count);
memory::Copy(platform::CPUPlace(), error_info.data(),
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
info_ptr, sizeof(int) * batch_count, dev_ctx.stream());
for (int i = 0; i < batch_count; ++i) {
PADDLE_ENFORCE_EQ(error_info[i], 0,
platform::errors::PreconditionNotMet(
"For batch [%d]: U(%d, %d) is zero, singular U.", i,
error_info[i], error_info[i]));
}
}
void Potrf(const platform::CUDADeviceContext& dev_ctx, cublasFillMode_t uplo,
int n, T* A, int lda, int* info) const;
void PotrfBatched(const platform::CUDADeviceContext& dev_ctx,
cublasFillMode_t uplo, int n, T* Aarray[], int lda,
int* info_array, int batch_size) const;
};
#define FUNC_WITH_TYPES(m) m(float, S) m(double, D)
#define POTRF_INSTANCE(T, C) \
template <> \
void CholeskyGPUKernel<T>::Potrf(const platform::CUDADeviceContext& dev_ctx, \
cublasFillMode_t uplo, int n, T* A, \
int lda, int* info) const { \
auto handle = dev_ctx.cusolver_dn_handle(); \
int workspace_size = 0; \
PADDLE_ENFORCE_CUDA_SUCCESS( \
platform::dynload::cusolverDn##C##potrf_bufferSize( \
handle, uplo, n, A, lda, &workspace_size)); \
auto workspace = memory::Alloc(dev_ctx, workspace_size); \
T* workspace_ptr = reinterpret_cast<T*>(workspace->ptr()); \
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDn##C##potrf( \
handle, uplo, n, A, lda, workspace_ptr, workspace_size, info)); \
}
FUNC_WITH_TYPES(POTRF_INSTANCE);
#if CUDA_VERSION >= 9020 && !defined(_WIN32)
#define POTRF_BATCH_INSTANCE(T, C) \
template <> \
void CholeskyGPUKernel<T>::PotrfBatched( \
const platform::CUDADeviceContext& dev_ctx, cublasFillMode_t uplo, \
int n, T* Aarray[], int lda, int* info_array, int batch_size) const { \
auto handle = dev_ctx.cusolver_dn_handle(); \
PADDLE_ENFORCE_CUDA_SUCCESS( \
platform::dynload::cusolverDn##C##potrfBatched( \
handle, uplo, n, Aarray, lda, info_array, batch_size)); \
}
FUNC_WITH_TYPES(POTRF_BATCH_INSTANCE);
#endif
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cholesky, ops::CholeskyGPUKernel<float>,
ops::CholeskyGPUKernel<double>);
REGISTER_OP_CUDA_KERNEL(
cholesky_grad,
ops::CholeskyGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CholeskyGradKernel<paddle::platform::CUDADeviceContext, double>);
#endif // not PADDLE_WITH_HIP
|
4bc2b5604eab2e64971006c206d0e2b365e18e71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Program perform matrix multiplication
*/
#include<stdio.h>
#include<cuda.h>
#include<assert.h>
#include<stdlib.h>
#include<sys/time.h>
#define VAL_LIMIT 10
#define DEBUG 0
#define TILE_WIDTH 32
hipError_t err;
/*
* @PRAM : Number of rows and columns
* @RETURN : Pointer to created Matrix
* @DESC :
* @SEE :
* @TODO :
*
*/
float* createMatrix(int r,int c)
{
float *temp;
temp = (float*) malloc(sizeof(float)*r*c);
return temp;
}
/*
* @DESC : Frees the memory allocated to the matrix
* @PRAM : pointer to the matrix
* @RETURN : Nothing
* @SEE :
* @TODO :
*
*/
void destroyMAtrix(float *mat)
{
free(mat);
}
/*
* @PRAM : Device pointer, number of rows and columns
* @RETURN : Nothing
* @DESC : Creates a matrix of float * rows * columns on device
* @SEE :
* @TODO :
*
*/
void createMatrixDevice(float **m, int r, int c)
{
int size = sizeof(float)*r*c;
err = hipSuccess;
err = hipMalloc(m, size);
if (err != hipSuccess)
{
fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*
* @PRAM : Host pointer, Device pointer, Number of rows and columns
* @RETURN : Nothing
* @DESC : Copies data from host pointer to device pointer
* @SEE :
* @TODO :
*
*/
void transferToDevice(float *hostptr, float *deviceptr, int r, int c)
{
int size = sizeof(float) * r*c;
err = hipSuccess;
err = hipMemcpy(deviceptr,hostptr,size,hipMemcpyHostToDevice);
if (err != hipSuccess)
{
//fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,hipGetErrorString(err));
fprintf(stderr,"%s",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void transferFromDevice(float *hostptr, float *deviceptr, int r, int c)
{
int size = sizeof(float) * r*c;
err = hipSuccess;
err = hipMemcpy(hostptr,deviceptr,size,hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void initMatrix(float *m,int r,int c)
{
for(int i=0;i<r;i++)
{
for(int j=0;j<c;j++)
{
m[ i*c +j ] = (float) (rand()%VAL_LIMIT);
}
}
}
void matMul(float *A, float *B, float *C, int Aw, int Ah, int Bw)
{
for( int i=0 ; i<Ah; i++)
{
for( int j=0; j<Bw; j++)
{
float sum=0;
for( int k=0; k<Aw; k++)
{
float a = A[i*Aw+k];
float b = B[k*Bw +j];
sum += a*b;
if(DEBUG)
printf(" %d * %d +",i*Aw+k,k*Bw+j);
}
C[i*Bw+j] = sum;
if(DEBUG)
printf("%d\n",i*Bw+j);
}
}
}
__global__
void matMulKernel(float *A, float *B, float *C, int Ac, int Ar, int Bc)
{
int row = blockIdx.x * TILE_WIDTH + threadIdx.x;
int col = blockIdx.y * TILE_WIDTH + threadIdx.y;
int sum = 0;
for ( int i=0 ; i<Ar; i++)
{
sum += A[row *Ac + i] * B[ i *Bc + col];
}
C[row*Bc+col] = sum;
}
void pMatMul(float *A,float *B,float *C, int Ac, int Ar, int Bw)
{
dim3 gridProp(ceil(Bw/TILE_WIDTH), ceil(Ar/TILE_WIDTH), 1);
dim3 blockProp(TILE_WIDTH,TILE_WIDTH,1);
hipLaunchKernelGGL(( matMulKernel), dim3(gridProp),dim3(blockProp), 0, 0, A, B, C, Ac, Ar, Bw);
}
void printMat(float *mat, int r, int c)
{
for(int i=0;i<r;i++)
{
for(int j=0;j<c;j++)
{
printf("%4.1f \t",mat[i*c+j]);
}
printf("\n");
}
}
bool check(float *mat, float *mat2, int r, int c)
{
for(int i=0;i<r;i++)
{
for(int j=0;j<c;j++)
{
if( mat2[i*c+j] != mat[i*c+j])
return false;
}
}
return true;
}
int main()
{
float *h_A, *h_B, *h_C,*h_D;
float *d_A, *d_B, *d_C;
float milisecs;
unsigned int Ar=1024, Ac=1024;
unsigned int Br=1024, Bc=1024;
unsigned int Cr=1024, Cc=1024;
assert(Ac == Br);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
h_A = createMatrix(Ar, Ac);
assert(h_A != NULL);
h_B = createMatrix(Br, Bc);
assert(h_B != NULL);
h_C = createMatrix(Cr, Cc);
assert(h_C != NULL);
h_D = createMatrix(Cr, Cc);
assert(h_D != NULL);
initMatrix(h_A, Ar, Ac);
initMatrix(h_B, Br, Bc);
if(DEBUG){
printf("Matrix A:\n");
printMat(h_A, Ar, Ac);
printf("Matrix B:\n");
printMat(h_B, Br, Bc);
}
matMul(h_A, h_B, h_C, Ac, Ar, Bc);
if(DEBUG){
printf("Matrix C:\n");
printMat(h_C, Cr, Cc);
}
createMatrixDevice(&d_A, Ar, Ac);
createMatrixDevice(&d_B, Br, Bc);
createMatrixDevice(&d_C, Cr, Cc);
transferToDevice(h_A, d_A, Ar, Ac);
transferToDevice(h_B, d_B, Br, Bc);
hipEventRecord(start);
pMatMul(d_A, d_B, d_C, Ac, Ar, Bc);
hipEventRecord(stop);
transferFromDevice(h_D, d_C, Cr, Cc);
hipEventSynchronize(stop);
hipEventElapsedTime(&milisecs,start,stop);
printf("Time required for parallel execution %f\n",milisecs);
if(DEBUG){
printf("Matrix D:\n");
printMat(h_D, Cr, Cc);
}
if(check(h_D, h_C, Cr, Cc))
printf("Success !! :) \n");
else
printf("Failed !! :( \n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
destroyMAtrix(h_A);
destroyMAtrix(h_B);
destroyMAtrix(h_D);
destroyMAtrix(h_C);
return 0;
}
| 4bc2b5604eab2e64971006c206d0e2b365e18e71.cu | /*
Program perform matrix multiplication
*/
#include<stdio.h>
#include<cuda.h>
#include<assert.h>
#include<stdlib.h>
#include<sys/time.h>
#define VAL_LIMIT 10
#define DEBUG 0
#define TILE_WIDTH 32
cudaError_t err;
/*
* @PRAM : Number of rows and columns
* @RETURN : Pointer to created Matrix
* @DESC :
* @SEE :
* @TODO :
*
*/
float* createMatrix(int r,int c)
{
float *temp;
temp = (float*) malloc(sizeof(float)*r*c);
return temp;
}
/*
* @DESC : Frees the memory allocated to the matrix
* @PRAM : pointer to the matrix
* @RETURN : Nothing
* @SEE :
* @TODO :
*
*/
void destroyMAtrix(float *mat)
{
free(mat);
}
/*
* @PRAM : Device pointer, number of rows and columns
* @RETURN : Nothing
* @DESC : Creates a matrix of float * rows * columns on device
* @SEE :
* @TODO :
*
*/
void createMatrixDevice(float **m, int r, int c)
{
int size = sizeof(float)*r*c;
err = cudaSuccess;
err = cudaMalloc(m, size);
if (err != cudaSuccess)
{
fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*
* @PRAM : Host pointer, Device pointer, Number of rows and columns
* @RETURN : Nothing
* @DESC : Copies data from host pointer to device pointer
* @SEE :
* @TODO :
*
*/
void transferToDevice(float *hostptr, float *deviceptr, int r, int c)
{
int size = sizeof(float) * r*c;
err = cudaSuccess;
err = cudaMemcpy(deviceptr,hostptr,size,cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
//fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,cudaGetErrorString(err));
fprintf(stderr,"%s",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void transferFromDevice(float *hostptr, float *deviceptr, int r, int c)
{
int size = sizeof(float) * r*c;
err = cudaSuccess;
err = cudaMemcpy(hostptr,deviceptr,size,cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void initMatrix(float *m,int r,int c)
{
for(int i=0;i<r;i++)
{
for(int j=0;j<c;j++)
{
m[ i*c +j ] = (float) (rand()%VAL_LIMIT);
}
}
}
void matMul(float *A, float *B, float *C, int Aw, int Ah, int Bw)
{
for( int i=0 ; i<Ah; i++)
{
for( int j=0; j<Bw; j++)
{
float sum=0;
for( int k=0; k<Aw; k++)
{
float a = A[i*Aw+k];
float b = B[k*Bw +j];
sum += a*b;
if(DEBUG)
printf(" %d * %d +",i*Aw+k,k*Bw+j);
}
C[i*Bw+j] = sum;
if(DEBUG)
printf("%d\n",i*Bw+j);
}
}
}
__global__
void matMulKernel(float *A, float *B, float *C, int Ac, int Ar, int Bc)
{
int row = blockIdx.x * TILE_WIDTH + threadIdx.x;
int col = blockIdx.y * TILE_WIDTH + threadIdx.y;
int sum = 0;
for ( int i=0 ; i<Ar; i++)
{
sum += A[row *Ac + i] * B[ i *Bc + col];
}
C[row*Bc+col] = sum;
}
void pMatMul(float *A,float *B,float *C, int Ac, int Ar, int Bw)
{
dim3 gridProp(ceil(Bw/TILE_WIDTH), ceil(Ar/TILE_WIDTH), 1);
dim3 blockProp(TILE_WIDTH,TILE_WIDTH,1);
matMulKernel<<<gridProp,blockProp>>>(A, B, C, Ac, Ar, Bw);
}
void printMat(float *mat, int r, int c)
{
for(int i=0;i<r;i++)
{
for(int j=0;j<c;j++)
{
printf("%4.1f \t",mat[i*c+j]);
}
printf("\n");
}
}
bool check(float *mat, float *mat2, int r, int c)
{
for(int i=0;i<r;i++)
{
for(int j=0;j<c;j++)
{
if( mat2[i*c+j] != mat[i*c+j])
return false;
}
}
return true;
}
int main()
{
float *h_A, *h_B, *h_C,*h_D;
float *d_A, *d_B, *d_C;
float milisecs;
unsigned int Ar=1024, Ac=1024;
unsigned int Br=1024, Bc=1024;
unsigned int Cr=1024, Cc=1024;
assert(Ac == Br);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
h_A = createMatrix(Ar, Ac);
assert(h_A != NULL);
h_B = createMatrix(Br, Bc);
assert(h_B != NULL);
h_C = createMatrix(Cr, Cc);
assert(h_C != NULL);
h_D = createMatrix(Cr, Cc);
assert(h_D != NULL);
initMatrix(h_A, Ar, Ac);
initMatrix(h_B, Br, Bc);
if(DEBUG){
printf("Matrix A:\n");
printMat(h_A, Ar, Ac);
printf("Matrix B:\n");
printMat(h_B, Br, Bc);
}
matMul(h_A, h_B, h_C, Ac, Ar, Bc);
if(DEBUG){
printf("Matrix C:\n");
printMat(h_C, Cr, Cc);
}
createMatrixDevice(&d_A, Ar, Ac);
createMatrixDevice(&d_B, Br, Bc);
createMatrixDevice(&d_C, Cr, Cc);
transferToDevice(h_A, d_A, Ar, Ac);
transferToDevice(h_B, d_B, Br, Bc);
cudaEventRecord(start);
pMatMul(d_A, d_B, d_C, Ac, Ar, Bc);
cudaEventRecord(stop);
transferFromDevice(h_D, d_C, Cr, Cc);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milisecs,start,stop);
printf("Time required for parallel execution %f\n",milisecs);
if(DEBUG){
printf("Matrix D:\n");
printMat(h_D, Cr, Cc);
}
if(check(h_D, h_C, Cr, Cc))
printf("Success !! :) \n");
else
printf("Failed !! :( \n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
destroyMAtrix(h_A);
destroyMAtrix(h_B);
destroyMAtrix(h_D);
destroyMAtrix(h_C);
return 0;
}
|
bbf6dee9da2501be751558aa28b176d9fbdb1fc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/diag_kernel.h"
#include "paddle/phi/kernels/funcs/diag_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
// Extract the diagonal of a matrix 'dout' to a matrix 'dx'
template <typename T>
__global__ void ExtractDiagonalKernel(const T* dout,
T* dx,
std::ptrdiff_t start,
std::ptrdiff_t dx_length,
const std::ptrdiff_t sumStride,
const std::ptrdiff_t xStride) {
for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < dx_length;
idx += gridDim.x * blockDim.x) {
const std::ptrdiff_t outOffset = start + sumStride * idx;
dx[xStride * idx] = dout[outOffset];
}
}
// Paste a vector 'dout' to the diagonal of a matrix 'dx'
template <typename T>
__global__ void PasteDiagonalKernel(const T* dout,
T* dx,
std::ptrdiff_t start,
std::ptrdiff_t size,
const std::ptrdiff_t sumStride,
const std::ptrdiff_t outStride) {
for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size;
idx += gridDim.x * blockDim.x) {
std::ptrdiff_t xOffset = start + sumStride * idx;
dx[xOffset] = dout[outStride * idx];
}
}
template <typename T, typename Context>
void DiagGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
int offset,
DenseTensor* x_grad) {
T* dx_data = dev_ctx.template Alloc<T>(x_grad);
auto* dout_data = out_grad.data<T>();
auto dx_dims = x_grad->dims();
auto dout_dims = out_grad.dims();
auto GetBlockGridSize = [&dev_ctx](int64_t size) {
const int64_t block_size =
::min(size, static_cast<int64_t>(dev_ctx.GetMaxThreadsPerBlock()));
int64_t max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int64_t max_blocks =
::max(((max_threads - 1) / block_size + 1), static_cast<int64_t>(1));
const int64_t grid_size =
::min(max_blocks, (size + block_size - 1) / block_size);
return std::tuple<int64_t, int64_t>{block_size, grid_size};
};
if (dx_dims.size() == 1) {
auto dx_length = dx_dims[0];
auto size = (offset > 0) ? dx_length + offset : dx_length - offset;
int dx_stride = phi::funcs::ComputeStride(0, dx_dims);
if (size > 0) {
auto dout_stride_0 = phi::funcs::ComputeStride(0, dout_dims);
auto dout_stride_1 = phi::funcs::ComputeStride(1, dout_dims);
auto start =
(offset >= 0 ? offset * dout_stride_1 : -offset * dout_stride_0);
std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size);
hipLaunchKernelGGL(( ExtractDiagonalKernel<T>)
, dim3(std::get<1>(block_grid_size)),
dim3(std::get<0>(block_grid_size)),
0,
dev_ctx.stream(), dout_data,
dx_data,
start,
dx_length,
dout_stride_0 + dout_stride_1,
dx_stride);
}
} else {
phi::funcs::SetConstant<Context, T> set_padding_value;
set_padding_value(dev_ctx, x_grad, static_cast<T>(0));
int dx_stride_0 = phi::funcs::ComputeStride(0, dx_dims);
int dx_stride_1 = phi::funcs::ComputeStride(1, dx_dims);
int64_t size;
if (offset > 0) {
size = ::min(dx_dims[0], dx_dims[1] - offset);
} else {
size = ::min(dx_dims[0] + offset, dx_dims[1]);
}
if (size > 0) {
auto start = (offset >= 0 ? offset * dx_stride_1 : -offset * dx_stride_0);
auto dout_stride_0 = phi::funcs::ComputeStride(0, dout_dims);
std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size);
hipLaunchKernelGGL(( PasteDiagonalKernel<T>), dim3(std::get<1>(block_grid_size)),
dim3(std::get<0>(block_grid_size)),
0,
dev_ctx.stream(), dout_data,
dx_data,
start,
size,
dx_stride_0 + dx_stride_1,
dout_stride_0);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(diag_grad,
GPU,
ALL_LAYOUT,
phi::DiagGradKernel,
phi::dtype::float16,
int,
int64_t,
float,
double) {}
| bbf6dee9da2501be751558aa28b176d9fbdb1fc5.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/diag_kernel.h"
#include "paddle/phi/kernels/funcs/diag_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
// Extract the diagonal of a matrix 'dout' to a matrix 'dx'
template <typename T>
__global__ void ExtractDiagonalKernel(const T* dout,
T* dx,
std::ptrdiff_t start,
std::ptrdiff_t dx_length,
const std::ptrdiff_t sumStride,
const std::ptrdiff_t xStride) {
for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < dx_length;
idx += gridDim.x * blockDim.x) {
const std::ptrdiff_t outOffset = start + sumStride * idx;
dx[xStride * idx] = dout[outOffset];
}
}
// Paste a vector 'dout' to the diagonal of a matrix 'dx'
template <typename T>
__global__ void PasteDiagonalKernel(const T* dout,
T* dx,
std::ptrdiff_t start,
std::ptrdiff_t size,
const std::ptrdiff_t sumStride,
const std::ptrdiff_t outStride) {
for (std::ptrdiff_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size;
idx += gridDim.x * blockDim.x) {
std::ptrdiff_t xOffset = start + sumStride * idx;
dx[xOffset] = dout[outStride * idx];
}
}
template <typename T, typename Context>
void DiagGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
int offset,
DenseTensor* x_grad) {
T* dx_data = dev_ctx.template Alloc<T>(x_grad);
auto* dout_data = out_grad.data<T>();
auto dx_dims = x_grad->dims();
auto dout_dims = out_grad.dims();
auto GetBlockGridSize = [&dev_ctx](int64_t size) {
const int64_t block_size =
std::min(size, static_cast<int64_t>(dev_ctx.GetMaxThreadsPerBlock()));
int64_t max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int64_t max_blocks =
std::max(((max_threads - 1) / block_size + 1), static_cast<int64_t>(1));
const int64_t grid_size =
std::min(max_blocks, (size + block_size - 1) / block_size);
return std::tuple<int64_t, int64_t>{block_size, grid_size};
};
if (dx_dims.size() == 1) {
auto dx_length = dx_dims[0];
auto size = (offset > 0) ? dx_length + offset : dx_length - offset;
int dx_stride = phi::funcs::ComputeStride(0, dx_dims);
if (size > 0) {
auto dout_stride_0 = phi::funcs::ComputeStride(0, dout_dims);
auto dout_stride_1 = phi::funcs::ComputeStride(1, dout_dims);
auto start =
(offset >= 0 ? offset * dout_stride_1 : -offset * dout_stride_0);
std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size);
ExtractDiagonalKernel<T>
<<<std::get<1>(block_grid_size),
std::get<0>(block_grid_size),
0,
dev_ctx.stream()>>>(dout_data,
dx_data,
start,
dx_length,
dout_stride_0 + dout_stride_1,
dx_stride);
}
} else {
phi::funcs::SetConstant<Context, T> set_padding_value;
set_padding_value(dev_ctx, x_grad, static_cast<T>(0));
int dx_stride_0 = phi::funcs::ComputeStride(0, dx_dims);
int dx_stride_1 = phi::funcs::ComputeStride(1, dx_dims);
int64_t size;
if (offset > 0) {
size = std::min(dx_dims[0], dx_dims[1] - offset);
} else {
size = std::min(dx_dims[0] + offset, dx_dims[1]);
}
if (size > 0) {
auto start = (offset >= 0 ? offset * dx_stride_1 : -offset * dx_stride_0);
auto dout_stride_0 = phi::funcs::ComputeStride(0, dout_dims);
std::tuple<int64_t, int64_t> block_grid_size = GetBlockGridSize(size);
PasteDiagonalKernel<T><<<std::get<1>(block_grid_size),
std::get<0>(block_grid_size),
0,
dev_ctx.stream()>>>(dout_data,
dx_data,
start,
size,
dx_stride_0 + dx_stride_1,
dout_stride_0);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(diag_grad,
GPU,
ALL_LAYOUT,
phi::DiagGradKernel,
phi::dtype::float16,
int,
int64_t,
float,
double) {}
|
69509a5057864c80dc9a0e0b9cd9c7b9e57e17b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x = threadIdx.x;
int y = blockIdx.x;
// int x = threadIdx.x;
// int y = threadIdx.y;
if (y < numRows && x < numCols){
int index = numCols*y + x;
uchar4 rgba = rgbaImage[index];
greyImage[index] = (unsigned char) (.299f * rgba.x + .587f * rgba.y + .114f * rgba.z);
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int blockWidth = 32;
const dim3 blockSize(numCols, 1, 1); //TODO
// const dim3 blockSize(100, 100, 1); //TODO
// const dim3 gridSize( numRows/blockWidth+1, numCols/blockWidth+1, 1); //TODO
const dim3 gridSize(numRows , 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 69509a5057864c80dc9a0e0b9cd9c7b9e57e17b2.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x = threadIdx.x;
int y = blockIdx.x;
// int x = threadIdx.x;
// int y = threadIdx.y;
if (y < numRows && x < numCols){
int index = numCols*y + x;
uchar4 rgba = rgbaImage[index];
greyImage[index] = (unsigned char) (.299f * rgba.x + .587f * rgba.y + .114f * rgba.z);
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int blockWidth = 32;
const dim3 blockSize(numCols, 1, 1); //TODO
// const dim3 blockSize(100, 100, 1); //TODO
// const dim3 gridSize( numRows/blockWidth+1, numCols/blockWidth+1, 1); //TODO
const dim3 gridSize(numRows , 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
62c2871b5b5598ed3abd29f9696dd504d354bbe9.hip | // !!! This is a file automatically generated by hipify!!!
//===============================================================================
// Name : MatrixRotate.cpp
// Author : Soumil Datta
// Version : 1.0
// Description : CUDA program to rotate an NxN matrix by 90 degrees to the right
//===============================================================================
#include <iostream>
using std::cout; using std::endl;
#include <hip/hip_runtime.h>
unsigned int dimension { 1u };
__global__ void transpose(float *matrix, const unsigned int dimension);
__global__ void reverse(float *matrix, const unsigned int dimension);
bool CPUSolveCheck(float *originalMatrix, float *solvedMatrix);
void printMatrix(const float *matrix);
int main(int argc, char* argv[]) {
if(argc != 2) {
cout << "Error: Enter dimension as argument" << endl;
exit(EXIT_FAILURE);
}
cout << "Rotating matrix of dimension " << argv[1] << endl;
dimension = atoi(argv[1]);
const size_t size { (dimension * dimension) * sizeof(float) };
float *h_matrix { (float *)malloc(size) };
if(h_matrix == nullptr) {
cout << "Host matrix memory allocation unsuccessful" << endl;
exit(EXIT_FAILURE);
}
// Fill matrix
for(auto i { 0u }; i < dimension * dimension; ++i) {
h_matrix[i] = rand()/(float)RAND_MAX;
}
// Copy array to be used while checking output
float *h_matrix_copy { (float *)malloc(size) };
memcpy(h_matrix_copy, h_matrix, size);
float *d_matrix = nullptr;
hipMalloc((void **)&d_matrix, size);
hipMemcpy(d_matrix, h_matrix, size, hipMemcpyHostToDevice);
const dim3 threadsPerBlock(16, 16);
const dim3 blocksPerGrid((dimension / threadsPerBlock.x) + 1, (dimension / threadsPerBlock.y) + 1);
hipLaunchKernelGGL(( transpose), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_matrix, dimension);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reverse), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_matrix, dimension);
hipDeviceSynchronize();
hipMemcpy(h_matrix, d_matrix, size, hipMemcpyDeviceToHost);
hipFree(d_matrix);
cout << endl << endl;
if(CPUSolveCheck(h_matrix_copy, h_matrix)) cout << "GPU Rotate Successful" << endl;
else cout << "GPU Rotate Unsuccessful" << endl;
cout << "Program complete" << endl;
free(h_matrix);
free(h_matrix_copy);
return 0;
}
__global__ void transpose(float *matrix, const unsigned int dimension) {
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < (dimension) && j < (dimension) && j > i) {
const auto index { dimension * i + j };
const auto invIndex { dimension * j + i };
const auto temp { matrix[index] };
matrix[index] = matrix[invIndex];
matrix[invIndex] = temp;
}
}
__global__ void reverse(float *matrix, const unsigned int dimension) {
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < (dimension) && j < (dimension / 2)) {
const auto index { dimension * i + j };
const auto revIndex { (dimension * i) + dimension - 1 - j };
const auto temp { matrix[index] };
matrix[index] = matrix[revIndex];
matrix[revIndex] = temp;
}
}
bool CPUSolveCheck(float *originalMatrix, float *solvedMatrix) {
// Solve CPU-side with OriginalMatrix
for(auto i { 0u }; i < dimension; ++i) {
for(auto j { i + 1 }; j < dimension; ++j) {
const auto index { dimension * i + j };
const auto invIndex { dimension * j + i };
const auto temp { originalMatrix[index] };
originalMatrix[index] = originalMatrix[invIndex];
originalMatrix[invIndex] = temp;
}
}
for(auto i { 0u }; i < dimension; ++i) {
for(auto j { 0u }; j < dimension / 2; ++j) {
const auto index { dimension * i + j };
const auto revIndex { (dimension * i) + dimension - 1 - j };
const auto temp { originalMatrix[index] };
originalMatrix[index] = originalMatrix[revIndex];
originalMatrix[revIndex] = temp;
}
}
// Check GPU output vs cpu output
for(auto i { 0u }; i < dimension; ++i) {
for(auto j { 0u }; j < dimension / 2; ++j) {
const auto index { dimension * i + j };
if(!(originalMatrix[index] == solvedMatrix[index])) return false;
}
}
return true;
}
// Utility
void printMatrix(const float *matrix) {
for(int i = 0; i < dimension * dimension; ++i) {
if(i != 0 && i % dimension == 0) cout << endl;
cout << matrix[i] << "\t";
}
cout << endl;
}
| 62c2871b5b5598ed3abd29f9696dd504d354bbe9.cu | //===============================================================================
// Name : MatrixRotate.cpp
// Author : Soumil Datta
// Version : 1.0
// Description : CUDA program to rotate an NxN matrix by 90 degrees to the right
//===============================================================================
#include <iostream>
using std::cout; using std::endl;
#include <cuda_runtime.h>
unsigned int dimension { 1u };
__global__ void transpose(float *matrix, const unsigned int dimension);
__global__ void reverse(float *matrix, const unsigned int dimension);
bool CPUSolveCheck(float *originalMatrix, float *solvedMatrix);
void printMatrix(const float *matrix);
int main(int argc, char* argv[]) {
if(argc != 2) {
cout << "Error: Enter dimension as argument" << endl;
exit(EXIT_FAILURE);
}
cout << "Rotating matrix of dimension " << argv[1] << endl;
dimension = atoi(argv[1]);
const size_t size { (dimension * dimension) * sizeof(float) };
float *h_matrix { (float *)malloc(size) };
if(h_matrix == nullptr) {
cout << "Host matrix memory allocation unsuccessful" << endl;
exit(EXIT_FAILURE);
}
// Fill matrix
for(auto i { 0u }; i < dimension * dimension; ++i) {
h_matrix[i] = rand()/(float)RAND_MAX;
}
// Copy array to be used while checking output
float *h_matrix_copy { (float *)malloc(size) };
memcpy(h_matrix_copy, h_matrix, size);
float *d_matrix = nullptr;
cudaMalloc((void **)&d_matrix, size);
cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice);
const dim3 threadsPerBlock(16, 16);
const dim3 blocksPerGrid((dimension / threadsPerBlock.x) + 1, (dimension / threadsPerBlock.y) + 1);
transpose<<<blocksPerGrid, threadsPerBlock>>>(d_matrix, dimension);
cudaDeviceSynchronize();
reverse<<<blocksPerGrid, threadsPerBlock>>>(d_matrix, dimension);
cudaDeviceSynchronize();
cudaMemcpy(h_matrix, d_matrix, size, cudaMemcpyDeviceToHost);
cudaFree(d_matrix);
cout << endl << endl;
if(CPUSolveCheck(h_matrix_copy, h_matrix)) cout << "GPU Rotate Successful" << endl;
else cout << "GPU Rotate Unsuccessful" << endl;
cout << "Program complete" << endl;
free(h_matrix);
free(h_matrix_copy);
return 0;
}
__global__ void transpose(float *matrix, const unsigned int dimension) {
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < (dimension) && j < (dimension) && j > i) {
const auto index { dimension * i + j };
const auto invIndex { dimension * j + i };
const auto temp { matrix[index] };
matrix[index] = matrix[invIndex];
matrix[invIndex] = temp;
}
}
__global__ void reverse(float *matrix, const unsigned int dimension) {
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < (dimension) && j < (dimension / 2)) {
const auto index { dimension * i + j };
const auto revIndex { (dimension * i) + dimension - 1 - j };
const auto temp { matrix[index] };
matrix[index] = matrix[revIndex];
matrix[revIndex] = temp;
}
}
bool CPUSolveCheck(float *originalMatrix, float *solvedMatrix) {
// Solve CPU-side with OriginalMatrix
for(auto i { 0u }; i < dimension; ++i) {
for(auto j { i + 1 }; j < dimension; ++j) {
const auto index { dimension * i + j };
const auto invIndex { dimension * j + i };
const auto temp { originalMatrix[index] };
originalMatrix[index] = originalMatrix[invIndex];
originalMatrix[invIndex] = temp;
}
}
for(auto i { 0u }; i < dimension; ++i) {
for(auto j { 0u }; j < dimension / 2; ++j) {
const auto index { dimension * i + j };
const auto revIndex { (dimension * i) + dimension - 1 - j };
const auto temp { originalMatrix[index] };
originalMatrix[index] = originalMatrix[revIndex];
originalMatrix[revIndex] = temp;
}
}
// Check GPU output vs cpu output
for(auto i { 0u }; i < dimension; ++i) {
for(auto j { 0u }; j < dimension / 2; ++j) {
const auto index { dimension * i + j };
if(!(originalMatrix[index] == solvedMatrix[index])) return false;
}
}
return true;
}
// Utility
void printMatrix(const float *matrix) {
for(int i = 0; i < dimension * dimension; ++i) {
if(i != 0 && i % dimension == 0) cout << endl;
cout << matrix[i] << "\t";
}
cout << endl;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.