hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
4d92580c6cd08552658c57f93b5c7ddafb22da9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <string>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <iostream>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(hipError_t err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (hipSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
err = hipDeviceSynchronize();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
__global__ void rgb_2_grey(uchar* const greyImage, const uchar4* const rgbImage, int rows, int columns)
{
int rgb_x = blockIdx.x * blockDim.x + threadIdx.x; //x coordinate of pixel
int rgb_y = blockIdx.y * blockDim.y + threadIdx.y; //y coordinate of pixel
//stops function here if condition is met
if ((rgb_x >= columns) || (rgb_y >= rows))
{
return;
}
int rgb_ab = rgb_y*columns + rgb_x; //absolute pixel position
uchar4 rgb_Img = rgbImage[rgb_ab];
greyImage[rgb_ab] = uchar((float(rgb_Img.x))*0.299f + (float(rgb_Img.y))*0.587f + (float(rgb_Img.z))*0.114f);
}
using namespace cv;
using namespace std;
void Load_img(string& filename);
void Proc_Img(uchar4 **d_RGBImage, uchar** d_greyImage);
void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols);
void Save_Img(string& filename);
Mat img_RGB;
Mat img_Grey;
uchar4 *d_rgbImg;
uchar *d_greyImg;
int main()
{
string input_img = "C:\\Users\\Austin\\Pictures\\wallpapers\\IMG_3575.JPG"; //input file path
string output_img = "C:\\Users\\Austin\\Pictures\\wallpapers\\IMG_3575GR2.JPG";//out put file path
Load_img(input_img);//loads input image and creates a Mat object, then converts colors from blue, green, red(BGR) format to standard red, green, blue(RGB) format,
//finally creates an array(allocates memory) for grey image
Proc_Img(&d_rgbImg, &d_greyImg);//allocates memory on gpu and copies data to gpu
RGB_2_Greyscale(d_greyImg, d_rgbImg, img_RGB.rows, img_RGB.cols);//calls kernel which turns image to grayscale
Save_Img(output_img);//writes final image to drive
return 0;
}
void Load_img(string& filename)
{
//loads image into a matrix object along with the colors in BGR format (must convert to rgb).
Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
//check if image loaded correctly
if (img.empty())
{
cerr << "File located at " << filename << " not read " << endl;
exit(1);
}
//converts color type from BGR to RGB
cvtColor(img, img_RGB, CV_BGR2RGBA);
//allocate memory for new greyscale image.
img_Grey.create(img.rows, img.cols, CV_8UC1); //img.rows returns the range of pixels in y, img.cols returns range of pixels in x
//CV_8UC1 means 8 bit unsigned(non-negative) single channel of color, aka greyscale.
//all three of the parameters allow the create function in the Mat class to determine how much memory to allocate
}
void Proc_Img(uchar4 **d_RGBImage, uchar** d_greyImage)
{
hipFree(0);
CudaCheckError();
//creates rgb and greyscale image arrays
uchar4 *h_RGBImage = (uchar4*)img_RGB.ptr<uchar>(0); //.ptr is a method in the mat class that returns a pointer to the first element of the matrix.
//this is just like a regular array/pointer mem address to first element of the array. This is templated
//in this case the compiler runs the function for returning pointer of type unsigned char. for rgb image it is
//cast to uchar4 struct to hold r,g,b, and alpha(ignored in program) values.
const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //amount of pixels
//allocate memory on gpu
hipMalloc(d_RGBImage, sizeof(uchar4) * num_pix); //bites of 1 uchar4 times # of pixels gives number of bites necessary for array
CudaCheckError();
hipMalloc(d_greyImage, sizeof(uchar) * num_pix); //bites of uchar times # pixels gives number of bites necessary for array
CudaCheckError();
hipMemset(*d_greyImage, 0, sizeof(uchar) * num_pix); //makes sure all data in allocated space is set to 0
CudaCheckError();
//copy array into allocated space
hipMemcpy(*d_RGBImage, h_RGBImage, sizeof(uchar4)*num_pix, hipMemcpyHostToDevice);
CudaCheckError();
d_rgbImg = *d_RGBImage;
d_greyImg = *d_greyImage;
}
void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols)
{
const int BS = 32;
const dim3 blockSize(BS, BS);
const dim3 gridSize((num_Cols / BS) + 1, (num_Rows / BS) + 1);
hipLaunchKernelGGL(( rgb_2_grey) , dim3(gridSize), dim3(blockSize), 0, 0, d_greyImage, d_RGBImage, num_Rows, num_Cols);
hipDeviceSynchronize(); CudaCheckError();
}
void Save_Img(string& filename)
{
const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //number of pixels
hipMemcpy(img_Grey.ptr<uchar>(0), d_greyImg, sizeof(uchar)*num_pix, hipMemcpyDeviceToHost); //copy array from gpu to cpu
CudaCheckError();
imwrite(filename.c_str(), img_Grey); //save image to drive
hipFree(d_rgbImg); //deallocate memory on gpu
hipFree(d_greyImg);//deallocate memory on gpu
}
| 4d92580c6cd08552658c57f93b5c7ddafb22da9a.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string>
#include <cuda.h>
#include <stdio.h>
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <iostream>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
err = cudaDeviceSynchronize();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
__global__ void rgb_2_grey(uchar* const greyImage, const uchar4* const rgbImage, int rows, int columns)
{
int rgb_x = blockIdx.x * blockDim.x + threadIdx.x; //x coordinate of pixel
int rgb_y = blockIdx.y * blockDim.y + threadIdx.y; //y coordinate of pixel
//stops function here if condition is met
if ((rgb_x >= columns) || (rgb_y >= rows))
{
return;
}
int rgb_ab = rgb_y*columns + rgb_x; //absolute pixel position
uchar4 rgb_Img = rgbImage[rgb_ab];
greyImage[rgb_ab] = uchar((float(rgb_Img.x))*0.299f + (float(rgb_Img.y))*0.587f + (float(rgb_Img.z))*0.114f);
}
using namespace cv;
using namespace std;
void Load_img(string& filename);
void Proc_Img(uchar4 **d_RGBImage, uchar** d_greyImage);
void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols);
void Save_Img(string& filename);
Mat img_RGB;
Mat img_Grey;
uchar4 *d_rgbImg;
uchar *d_greyImg;
int main()
{
string input_img = "C:\\Users\\Austin\\Pictures\\wallpapers\\IMG_3575.JPG"; //input file path
string output_img = "C:\\Users\\Austin\\Pictures\\wallpapers\\IMG_3575GR2.JPG";//out put file path
Load_img(input_img);//loads input image and creates a Mat object, then converts colors from blue, green, red(BGR) format to standard red, green, blue(RGB) format,
//finally creates an array(allocates memory) for grey image
Proc_Img(&d_rgbImg, &d_greyImg);//allocates memory on gpu and copies data to gpu
RGB_2_Greyscale(d_greyImg, d_rgbImg, img_RGB.rows, img_RGB.cols);//calls kernel which turns image to grayscale
Save_Img(output_img);//writes final image to drive
return 0;
}
void Load_img(string& filename)
{
//loads image into a matrix object along with the colors in BGR format (must convert to rgb).
Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
//check if image loaded correctly
if (img.empty())
{
cerr << "File located at " << filename << " not read " << endl;
exit(1);
}
//converts color type from BGR to RGB
cvtColor(img, img_RGB, CV_BGR2RGBA);
//allocate memory for new greyscale image.
img_Grey.create(img.rows, img.cols, CV_8UC1); //img.rows returns the range of pixels in y, img.cols returns range of pixels in x
//CV_8UC1 means 8 bit unsigned(non-negative) single channel of color, aka greyscale.
//all three of the parameters allow the create function in the Mat class to determine how much memory to allocate
}
void Proc_Img(uchar4 **d_RGBImage, uchar** d_greyImage)
{
cudaFree(0);
CudaCheckError();
//creates rgb and greyscale image arrays
uchar4 *h_RGBImage = (uchar4*)img_RGB.ptr<uchar>(0); //.ptr is a method in the mat class that returns a pointer to the first element of the matrix.
//this is just like a regular array/pointer mem address to first element of the array. This is templated
//in this case the compiler runs the function for returning pointer of type unsigned char. for rgb image it is
//cast to uchar4 struct to hold r,g,b, and alpha(ignored in program) values.
const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //amount of pixels
//allocate memory on gpu
cudaMalloc(d_RGBImage, sizeof(uchar4) * num_pix); //bites of 1 uchar4 times # of pixels gives number of bites necessary for array
CudaCheckError();
cudaMalloc(d_greyImage, sizeof(uchar) * num_pix); //bites of uchar times # pixels gives number of bites necessary for array
CudaCheckError();
cudaMemset(*d_greyImage, 0, sizeof(uchar) * num_pix); //makes sure all data in allocated space is set to 0
CudaCheckError();
//copy array into allocated space
cudaMemcpy(*d_RGBImage, h_RGBImage, sizeof(uchar4)*num_pix, cudaMemcpyHostToDevice);
CudaCheckError();
d_rgbImg = *d_RGBImage;
d_greyImg = *d_greyImage;
}
void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols)
{
const int BS = 32;
const dim3 blockSize(BS, BS);
const dim3 gridSize((num_Cols / BS) + 1, (num_Rows / BS) + 1);
rgb_2_grey <<<gridSize, blockSize>>>(d_greyImage, d_RGBImage, num_Rows, num_Cols);
cudaDeviceSynchronize(); CudaCheckError();
}
void Save_Img(string& filename)
{
const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //number of pixels
cudaMemcpy(img_Grey.ptr<uchar>(0), d_greyImg, sizeof(uchar)*num_pix, cudaMemcpyDeviceToHost); //copy array from gpu to cpu
CudaCheckError();
imwrite(filename.c_str(), img_Grey); //save image to drive
cudaFree(d_rgbImg); //deallocate memory on gpu
cudaFree(d_greyImg);//deallocate memory on gpu
}
|
0f5c1a5420e566c1ec95363bde6e815ef2bff922.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=64 --blockDim=256
template<class TData> __global__ void testKernel(TData *d_odata, TData *d_idata, int numElements);
template __global__ void testKernel<int>(int *d_odata, int *d_idata, int numElements);
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
| 0f5c1a5420e566c1ec95363bde6e815ef2bff922.cu | //pass
//--gridDim=64 --blockDim=256
template<class TData> __global__ void testKernel(TData *d_odata, TData *d_idata, int numElements);
template __global__ void testKernel<int>(int *d_odata, int *d_idata, int numElements);
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
|
b7be1642dfce67d6396d686e61540ffd8d1538a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
struct one_if_not_space : public thrust::unary_function<char,int>
{
__host__ __device__
int operator()(char c) { return c == '\n' ? 0 : 1; }
};
void CountPosition1(const char *text, int *pos, int text_size)
{
one_if_not_space func;
thrust::transform(thrust::device, text, text + text_size, pos, func);
thrust::inclusive_scan_by_key(thrust::device, pos, pos + text_size, pos, pos);
}
__global__ void countPositionKernel(const char *text, int *pos, int text_size) {
auto i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= text_size) {
return;
}
if (i == 0 || text[i - 1] == '\n') {
int cnt = 0;
while (i + cnt < text_size && text[i + cnt] != '\n') {
pos[i + cnt] = cnt + 1;
cnt += 1;
}
}
}
void CountPosition2(const char *text, int *pos, int text_size)
{
hipLaunchKernelGGL(( countPositionKernel), dim3((text_size + 255)/ 256), dim3(256), 0, 0, text, pos, text_size);
// one_if_not_space func;
// thrust::transform(text, text + text_size, pos, func);
// thrust::inclusive_scan_by_key(pos, pos + text_size, pos, pos);
}
| b7be1642dfce67d6396d686e61540ffd8d1538a1.cu | #include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
struct one_if_not_space : public thrust::unary_function<char,int>
{
__host__ __device__
int operator()(char c) { return c == '\n' ? 0 : 1; }
};
void CountPosition1(const char *text, int *pos, int text_size)
{
one_if_not_space func;
thrust::transform(thrust::device, text, text + text_size, pos, func);
thrust::inclusive_scan_by_key(thrust::device, pos, pos + text_size, pos, pos);
}
__global__ void countPositionKernel(const char *text, int *pos, int text_size) {
auto i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= text_size) {
return;
}
if (i == 0 || text[i - 1] == '\n') {
int cnt = 0;
while (i + cnt < text_size && text[i + cnt] != '\n') {
pos[i + cnt] = cnt + 1;
cnt += 1;
}
}
}
void CountPosition2(const char *text, int *pos, int text_size)
{
countPositionKernel<<< (text_size + 255)/ 256, 256>>>(text, pos, text_size);
// one_if_not_space func;
// thrust::transform(text, text + text_size, pos, func);
// thrust::inclusive_scan_by_key(pos, pos + text_size, pos, pos);
}
|
f2b2844229d74531fd98342a4cdc52acaa4d3dd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication and is exactly the same as
* Chapter 7 of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
//#include <helper_functions.h>
// includes, kernels
#include <matrixMul_kernel.cuh>
#include <matrixMul_naive.cuh>
#include <matrixMul_tiling.cuh>
#include <matrixMul_coalescing.cuh>
#include <matrixMul_noBankConflict.cuh>
#include <matrixMul_compOpt.cuh>
#include <matrixMul_unroll.cuh>
#include <matrixMul_prefetch.cuh>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
inline int stringRemoveDelimiter(char delimiter, const char *string)
{
int string_start = 0;
while (string[string_start] == delimiter)
{
string_start++;
}
if (string_start >= (int)strlen(string)-1)
{
return 0;
}
return string_start;
}
inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = (int)strlen(string_ref);
if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length))
{
bFound = true;
continue;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
int value = -1;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = (int)strlen(string_ref);
if (!STRNCASECMP(string_argv, string_ref, length))
{
if (length+1 <= (int)strlen(string_argv))
{
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
}
else
{
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound)
{
return value;
}
else
{
return 0;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
/****************************************************/
/* Preparations */
/****************************************************/
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// utilities
hipEvent_t start;
hipEvent_t stop;
float msecTotal;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
float flop = 2 * (float)WC * (float)HC * (float)WA;
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
hipMalloc((void**) &d_A, mem_size_A);
float* d_B;
hipMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
#if CHECK_RESULT == 1
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Naive CPU (Golden Reference)\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#endif
dim3 threads,grid;
/****************************************************/
/* CUDA SDK example */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// execute the kernel
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("GPU SDK Sample\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* naive implementation on GPU */
/****************************************************/
#if ENABLE_NAIVE == 1
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_naive), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Naive GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
#endif
/****************************************************/
/* Tiling without global mem coalescing */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_tiling), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("Tiling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing with smem bank conflict */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_coalescing), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
//printf("Global mem coalescing GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing w/o smem bank conflict */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_noBankConflict), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
//printf("Remove shared mem bank conflict GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Threads perform computation optimizatin */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_compOpt), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
//printf("Threads perform computation optimization GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Loop Unrolling */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_unroll), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
//printf("Loop unrolling GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Prefetching */
/****************************************************/
// create and start timer
hipEventCreate(&start);
hipEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A,
hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B,
hipMemcpyHostToDevice);
// naive implementation
hipLaunchKernelGGL(( matrixMul_prefetch), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
// stop and destroy timer
hipEventCreate(&stop);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
//printf("Prefetching GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Cleaning */
/****************************************************/
// clean up memory
free(h_A);
free(h_B);
free(h_C);
#if CHECK_RESULT == 1
free(reference);
#endif
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (fabs(data1[k] - data2[k]) > 0.1 ) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("Total Errors = %d \n", error_count);
}
| f2b2844229d74531fd98342a4cdc52acaa4d3dd5.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication and is exactly the same as
* Chapter 7 of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
//#include <helper_functions.h>
// includes, kernels
#include <matrixMul_kernel.cuh>
#include <matrixMul_naive.cuh>
#include <matrixMul_tiling.cuh>
#include <matrixMul_coalescing.cuh>
#include <matrixMul_noBankConflict.cuh>
#include <matrixMul_compOpt.cuh>
#include <matrixMul_unroll.cuh>
#include <matrixMul_prefetch.cuh>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
////////////////////////////////////////////////////////////////////////////////
// Helper Functions
////////////////////////////////////////////////////////////////////////////////
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
inline int stringRemoveDelimiter(char delimiter, const char *string)
{
int string_start = 0;
while (string[string_start] == delimiter)
{
string_start++;
}
if (string_start >= (int)strlen(string)-1)
{
return 0;
}
return string_start;
}
inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = (int)strlen(string_ref);
if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length))
{
bFound = true;
continue;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref)
{
bool bFound = false;
int value = -1;
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = (int)strlen(string_ref);
if (!STRNCASECMP(string_argv, string_ref, length))
{
if (length+1 <= (int)strlen(string_argv))
{
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
}
else
{
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound)
{
return value;
}
else
{
return 0;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
/****************************************************/
/* Preparations */
/****************************************************/
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// utilities
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
float flop = 2 * (float)WC * (float)HC * (float)WA;
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
cudaMalloc((void**) &d_A, mem_size_A);
float* d_B;
cudaMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
#if CHECK_RESULT == 1
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// compute reference solution
float* reference = (float*) malloc(mem_size_C);
computeGold(reference, h_A, h_B, HA, WA, WB);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Naive CPU (Golden Reference)\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#endif
dim3 threads,grid;
/****************************************************/
/* CUDA SDK example */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// execute the kernel
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU SDK Sample\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* naive implementation on GPU */
/****************************************************/
#if ENABLE_NAIVE == 1
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_naive<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Naive GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
#endif
/****************************************************/
/* Tiling without global mem coalescing */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_tiling<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Tiling GPU\n");
printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing with smem bank conflict */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_coalescing<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
//printf("Global mem coalescing GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Global mem coalescing w/o smem bank conflict */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_noBankConflict<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
//printf("Remove shared mem bank conflict GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Threads perform computation optimizatin */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_compOpt<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
//printf("Threads perform computation optimization GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Loop Unrolling */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_unroll<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
//printf("Loop unrolling GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Prefetching */
/****************************************************/
// create and start timer
cudaEventCreate(&start);
cudaEventRecord(start, NULL);
// setup execution parameters
threads = dim3(BLOCK_SIZE, 4);
grid = dim3(WC / (BLOCK_SIZE*4), HC / BLOCK_SIZE);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// naive implementation
matrixMul_prefetch<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
// stop and destroy timer
cudaEventCreate(&stop);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
//printf("Prefetching GPU\n");
//printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal/ 1e+6);
#if CHECK_RESULT == 1
// check result
//printDiff(reference, h_C, WC, HC);
#endif
/****************************************************/
/* Cleaning */
/****************************************************/
// clean up memory
free(h_A);
free(h_B);
free(h_C);
#if CHECK_RESULT == 1
free(reference);
#endif
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaThreadExit();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void printDiff(float *data1, float *data2, int width, int height)
{
int i,j,k;
int error_count=0;
for (j=0; j<height; j++) {
for (i=0; i<width; i++) {
k = j*width+i;
if (fabs(data1[k] - data2[k]) > 0.1 ) {
printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i,j, data1[k], data2[k]);
error_count++;
}
}
}
printf("Total Errors = %d \n", error_count);
}
|
febd26683c943696ebfe7a7d5282123501fdf0ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
using namespace std;
#define imin(a,b) (a<b?a:b)
const int N = 33*1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock);
// dot on the kernel
__global__ void dot(float *a, float *b, float *c)
{
__shared__ float cache[threadsPerBlock];
int cacheIndex = threadIdx.x;
float temp = 0.0;
for (int tid = threadIdx.x + blockIdx.x*blockDim.x; tid<N; tid += blockDim.x*gridDim.x)
{
temp += a[tid]*b[tid];
}
cache[cacheIndex] = temp;
__syncthreads();
// reduction
for (int i = blockDim.x/2; i>0; i /= 2)
{
if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
}
if (threadIdx.x == 0) c[blockIdx.x] = cache[0];
}
// main fn
int main(void)
{
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
partial_c = (float*)malloc(blocksPerGrid*sizeof(float));
hipMalloc((void**)&dev_a, N*sizeof(float));
hipMalloc((void**)&dev_b, N*sizeof(float));
hipMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float));
for (int i=0; i<N; i++)
{
a[i] = i;
b[i] = 2*i;
}
hipMemcpy(dev_a, a, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c);
hipMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost);
c = 0.0;
for (int i=0; i<blocksPerGrid; i++)
{
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
cout<< "GPU value = "<<c<<" analytical value = "<<2*sum_squares((float)(N-1))<<endl;
hipFree(dev_a);
hipFree(dev_b);
hipFree(partial_c);
free(a);
free(b);
free(partial_c);
}
| febd26683c943696ebfe7a7d5282123501fdf0ba.cu | #include <iostream>
#include <stdio.h>
using namespace std;
#define imin(a,b) (a<b?a:b)
const int N = 33*1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock);
// dot on the kernel
__global__ void dot(float *a, float *b, float *c)
{
__shared__ float cache[threadsPerBlock];
int cacheIndex = threadIdx.x;
float temp = 0.0;
for (int tid = threadIdx.x + blockIdx.x*blockDim.x; tid<N; tid += blockDim.x*gridDim.x)
{
temp += a[tid]*b[tid];
}
cache[cacheIndex] = temp;
__syncthreads();
// reduction
for (int i = blockDim.x/2; i>0; i /= 2)
{
if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
}
if (threadIdx.x == 0) c[blockIdx.x] = cache[0];
}
// main fn
int main(void)
{
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
partial_c = (float*)malloc(blocksPerGrid*sizeof(float));
cudaMalloc((void**)&dev_a, N*sizeof(float));
cudaMalloc((void**)&dev_b, N*sizeof(float));
cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float));
for (int i=0; i<N; i++)
{
a[i] = i;
b[i] = 2*i;
}
cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice);
dot<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
c = 0.0;
for (int i=0; i<blocksPerGrid; i++)
{
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
cout<< "GPU value = "<<c<<" analytical value = "<<2*sum_squares((float)(N-1))<<endl;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(partial_c);
free(a);
free(b);
free(partial_c);
}
|
ca246c826eb8ec41ebb758c02f8918e3c428a52a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void debug_ker(float* ptr, int addr){
//int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("%d %f\n", addr, ptr[addr]);
} | ca246c826eb8ec41ebb758c02f8918e3c428a52a.cu | #include "includes.h"
__global__ void debug_ker(float* ptr, int addr){
//int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("%d %f\n", addr, ptr[addr]);
} |
7fcee9b992851e471fe600980f47cc702a135574.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/bool.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
template <typename T>
__global__ void gpu_atomic_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAdd(&result[0], data[id]);
atomicMin(&result[1], data[id]);
atomicMax(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T, typename BinaryOp>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op) {
T old_value = *addr;
T assumed;
do {
assumed = old_value;
T new_value = op(old_value, value);
old_value = atomicCAS(addr, assumed, new_value);
} while (assumed != old_value);
return old_value;
}
template <typename T>
__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomic_op(&result[0], data[id], cudf::DeviceSum{});
atomic_op(&result[1], data[id], cudf::DeviceMin{});
atomic_op(&result[2], data[id], cudf::DeviceMax{});
atomic_op(&result[3], data[id], cudf::DeviceSum{});
atomic_op(&result[4], data[id], cudf::DeviceMin{});
atomic_op(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T>
typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
return std::accumulate(xs.begin(), xs.end(), T{0});
}
template <typename T>
typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
auto ys = std::vector<typename T::rep>(xs.size());
std::transform(xs.begin(), xs.end(), ys.begin(),
[](T const& ts) { return ts.time_since_epoch().count(); });
return T{std::accumulate(ys.begin(), ys.end(), 0)};
}
template <typename T>
struct AtomicsTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<int> const& v_input,
bool is_cas_test,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
// use transform from std::vector<int> instead.
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
T exact[3];
exact[0] = accumulate<T>(v);
exact[1] = *(std::min_element(v.begin(), v.end()));
exact[2] = *(std::max_element(v.begin(), v.end()));
std::vector<T> result_init(6);
result_init[0] = T{0};
result_init[1] = std::numeric_limits<T>::max();
result_init[2] = std::numeric_limits<T>::min();
result_init[3] = result_init[0];
result_init[4] = result_init[1];
result_init[5] = result_init[2];
thrust::device_vector<T> dev_data(v);
thrust::device_vector<T> dev_result(result_init);
if (block_size == 0) {
block_size = vec_size;
}
if (is_cas_test) {
hipLaunchKernelGGL(( gpu_atomicCAS_test), dim3(grid_size), dim3(block_size), 0, 0,
dev_result.data().get(), dev_data.data().get(), vec_size);
} else {
hipLaunchKernelGGL(( gpu_atomic_test), dim3(grid_size), dim3(block_size), 0, 0,
dev_result.data().get(), dev_data.data().get(), vec_size);
}
thrust::host_vector<T> host_result(dev_result);
hipDeviceSynchronize();
CUDA_CHECK_LAST();
EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed";
}
};
TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypes);
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOps) {
bool is_cas_test = false;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCAS) {
bool is_cas_test = true;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOpsGrid) {
bool is_cas_test = false;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCASGrid) {
bool is_cas_test = true;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for large array
TYPED_TEST(AtomicsTest, atomicOpsRandom) {
bool is_cas_test = false;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
TYPED_TEST(AtomicsTest, atomicCASRandom) {
bool is_cas_test = true;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
template <typename T>
__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAnd(&result[0], data[id]);
atomicOr(&result[1], data[id]);
atomicXor(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{});
}
}
template <typename T>
struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<uint64_t> const& v_input,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
std::vector<T> identity = {T(~0ull), T(0), T(0), T(~0ull), T(0), T(0)};
T exact[3];
exact[0] = std::accumulate(v.begin(), v.end(), identity[0],
[](T acc, uint64_t i) { return acc & T(i); });
exact[1] = std::accumulate(v.begin(), v.end(), identity[1],
[](T acc, uint64_t i) { return acc | T(i); });
exact[2] = std::accumulate(v.begin(), v.end(), identity[2],
[](T acc, uint64_t i) { return acc ^ T(i); });
thrust::device_vector<T> dev_result(identity);
thrust::device_vector<T> dev_data(v);
if (block_size == 0) {
block_size = vec_size;
}
hipLaunchKernelGGL(( gpu_atomic_bitwiseOp_test<T>), dim3(grid_size), dim3(block_size), 0, 0,
reinterpret_cast<T*>(dev_result.data().get()),
reinterpret_cast<T*>(dev_data.data().get()), vec_size);
thrust::host_vector<T> host_result(dev_result);
hipDeviceSynchronize();
CUDA_CHECK_LAST();
print_exact(exact, "exact");
print_exact(host_result.data(), "result");
EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed";
}
void print_exact(const T* v, const char* msg) {
std::cout << std::hex << std::showbase;
std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", "
<< +v[2] << "}" << std::endl;
}
};
using BitwiseOpTestingTypes = cudf::test::Types<int8_t,
int16_t,
int32_t,
int64_t,
uint8_t,
uint16_t,
uint32_t,
uint64_t>;
TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes);
TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) {
{ // test for AND, XOR
std::vector<uint64_t> input_array({0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc,
0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc});
this->atomic_test(input_array);
}
{ // test for OR, XOR
std::vector<uint64_t> input_array({0x01, 0xfc02, 0x1dff03,
0x1100a0b0801d0003, 0x8000000000000000,
0x1dff03});
this->atomic_test(input_array);
}
}
| 7fcee9b992851e471fe600980f47cc702a135574.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/bool.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
template <typename T>
__global__ void gpu_atomic_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAdd(&result[0], data[id]);
atomicMin(&result[1], data[id]);
atomicMax(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T, typename BinaryOp>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op) {
T old_value = *addr;
T assumed;
do {
assumed = old_value;
T new_value = op(old_value, value);
old_value = atomicCAS(addr, assumed, new_value);
} while (assumed != old_value);
return old_value;
}
template <typename T>
__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomic_op(&result[0], data[id], cudf::DeviceSum{});
atomic_op(&result[1], data[id], cudf::DeviceMin{});
atomic_op(&result[2], data[id], cudf::DeviceMax{});
atomic_op(&result[3], data[id], cudf::DeviceSum{});
atomic_op(&result[4], data[id], cudf::DeviceMin{});
atomic_op(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T>
typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
return std::accumulate(xs.begin(), xs.end(), T{0});
}
template <typename T>
typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
auto ys = std::vector<typename T::rep>(xs.size());
std::transform(xs.begin(), xs.end(), ys.begin(),
[](T const& ts) { return ts.time_since_epoch().count(); });
return T{std::accumulate(ys.begin(), ys.end(), 0)};
}
template <typename T>
struct AtomicsTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<int> const& v_input,
bool is_cas_test,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
// use transform from std::vector<int> instead.
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
T exact[3];
exact[0] = accumulate<T>(v);
exact[1] = *(std::min_element(v.begin(), v.end()));
exact[2] = *(std::max_element(v.begin(), v.end()));
std::vector<T> result_init(6);
result_init[0] = T{0};
result_init[1] = std::numeric_limits<T>::max();
result_init[2] = std::numeric_limits<T>::min();
result_init[3] = result_init[0];
result_init[4] = result_init[1];
result_init[5] = result_init[2];
thrust::device_vector<T> dev_data(v);
thrust::device_vector<T> dev_result(result_init);
if (block_size == 0) {
block_size = vec_size;
}
if (is_cas_test) {
gpu_atomicCAS_test<<<grid_size, block_size>>>(
dev_result.data().get(), dev_data.data().get(), vec_size);
} else {
gpu_atomic_test<<<grid_size, block_size>>>(
dev_result.data().get(), dev_data.data().get(), vec_size);
}
thrust::host_vector<T> host_result(dev_result);
cudaDeviceSynchronize();
CUDA_CHECK_LAST();
EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed";
}
};
TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypes);
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOps) {
bool is_cas_test = false;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCAS) {
bool is_cas_test = true;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOpsGrid) {
bool is_cas_test = false;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCASGrid) {
bool is_cas_test = true;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for large array
TYPED_TEST(AtomicsTest, atomicOpsRandom) {
bool is_cas_test = false;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
TYPED_TEST(AtomicsTest, atomicCASRandom) {
bool is_cas_test = true;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
template <typename T>
__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAnd(&result[0], data[id]);
atomicOr(&result[1], data[id]);
atomicXor(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{});
}
}
template <typename T>
struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<uint64_t> const& v_input,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
std::vector<T> identity = {T(~0ull), T(0), T(0), T(~0ull), T(0), T(0)};
T exact[3];
exact[0] = std::accumulate(v.begin(), v.end(), identity[0],
[](T acc, uint64_t i) { return acc & T(i); });
exact[1] = std::accumulate(v.begin(), v.end(), identity[1],
[](T acc, uint64_t i) { return acc | T(i); });
exact[2] = std::accumulate(v.begin(), v.end(), identity[2],
[](T acc, uint64_t i) { return acc ^ T(i); });
thrust::device_vector<T> dev_result(identity);
thrust::device_vector<T> dev_data(v);
if (block_size == 0) {
block_size = vec_size;
}
gpu_atomic_bitwiseOp_test<T><<<grid_size, block_size>>>(
reinterpret_cast<T*>(dev_result.data().get()),
reinterpret_cast<T*>(dev_data.data().get()), vec_size);
thrust::host_vector<T> host_result(dev_result);
cudaDeviceSynchronize();
CUDA_CHECK_LAST();
print_exact(exact, "exact");
print_exact(host_result.data(), "result");
EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed";
}
void print_exact(const T* v, const char* msg) {
std::cout << std::hex << std::showbase;
std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", "
<< +v[2] << "}" << std::endl;
}
};
using BitwiseOpTestingTypes = cudf::test::Types<int8_t,
int16_t,
int32_t,
int64_t,
uint8_t,
uint16_t,
uint32_t,
uint64_t>;
TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes);
TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) {
{ // test for AND, XOR
std::vector<uint64_t> input_array({0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc,
0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc});
this->atomic_test(input_array);
}
{ // test for OR, XOR
std::vector<uint64_t> input_array({0x01, 0xfc02, 0x1dff03,
0x1100a0b0801d0003, 0x8000000000000000,
0x1dff03});
this->atomic_test(input_array);
}
}
|
05b74cae14851eea6576c49b30695402cb9b5258.hip | // !!! This is a file automatically generated by hipify!!!
/*!
Count triangles using warp-granularity dynamic algorithm selection
*/
#include <iostream>
#include <roctracer/roctx.h>
#include "clara/clara.hpp"
#include <fmt/format.h>
#include "pangolin/algorithm/tc_edge_dysel.cuh"
#include "pangolin/bounded_buffer.hpp"
#include "pangolin/configure.hpp"
#include "pangolin/cuda_cxx/rc_stream.hpp"
#include "pangolin/file/edge_list_file.hpp"
#include "pangolin/init.hpp"
#include "pangolin/sparse/csr_coo.hpp"
struct RunOptions {
std::vector<int> gpus;
std::string path;
std::string sep;
bool readMostly;
bool accessedBy;
bool prefetchAsync;
int dimBlock;
int iters;
};
template <typename V> void print_vec(const V &vec, const std::string &sep) {
for (const auto &e : vec) {
fmt::print("{}{}", sep, e);
}
}
void print_header(RunOptions &opts) {
fmt::print("bmark{0}bs{0}sb{0}graph{0}nodes{0}edges{0}tris", opts.sep);
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}time{}", opts.sep, i);
}
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}teps{}", opts.sep, i);
}
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_time{}", opts.sep, i);
}
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_teps{}", opts.sep, i);
}
fmt::print("\n");
}
template <typename Index> int run(RunOptions &opts) {
typedef pangolin::DiEdge<Index> Edge;
std::vector<int> gpus = opts.gpus;
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
// read data
auto start = std::chrono::system_clock::now();
pangolin::EdgeListFile file(opts.path);
std::vector<Edge> edges;
std::vector<Edge> fileEdges;
while (file.get_edges(fileEdges, 500)) {
edges.insert(edges.end(), fileEdges.begin(), fileEdges.end());
}
double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "read_data time {}s", elapsed);
LOG(debug, "read {} edges", edges.size());
// create csr and count `iters` times
std::vector<double> times;
std::vector<double> kernelTimes;
uint64_t nodes;
uint64_t tris;
uint64_t nnz;
for (int i = 0; i < opts.iters; ++i) {
// create csr
start = std::chrono::system_clock::now();
auto upperTriangularFilter = [](Edge e) { return e.src < e.dst; };
auto lowerTriangularFilter = [](Edge e) { return e.src > e.dst; };
auto adj = pangolin::CSRCOO<Index>::from_edges(edges.begin(), edges.end(), upperTriangularFilter);
LOG(debug, "nnz = {}", adj.nnz());
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "create CSR time {}s", elapsed);
// read-mostly
roctxRangePush("read-mostly");
const auto startHints = std::chrono::system_clock::now();
if (opts.readMostly) {
adj.read_mostly();
for (const auto &gpu : gpus) {
CUDA_RUNTIME(hipSetDevice(gpu));
CUDA_RUNTIME(hipDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - startHints).count() / 1e9;
roctxRangePop();
LOG(info, "read-mostly CSR time {}s", elapsed);
// accessed-by
start = std::chrono::system_clock::now();
if (opts.accessedBy) {
for (const auto &gpu : gpus) {
adj.accessed_by(gpu);
CUDA_RUNTIME(hipSetDevice(gpu));
CUDA_RUNTIME(hipDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
start = std::chrono::system_clock::now();
if (opts.prefetchAsync) {
for (const auto &gpu : gpus) {
adj.prefetch_async(gpu);
CUDA_RUNTIME(hipSetDevice(gpu));
CUDA_RUNTIME(hipDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "prefetch CSR time {}s", elapsed);
// create async counters
start = std::chrono::system_clock::now();
std::vector<pangolin::EdgeWarpDyselTC> counters;
for (int dev : gpus) {
LOG(debug, "create device {} counter", dev);
counters.push_back(std::move(pangolin::EdgeWarpDyselTC(dev)));
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "counter ctor time {}s", elapsed);
// determine the number of edges per gpu
const size_t edgesPerGPU = (adj.nnz() + gpus.size() - 1) / gpus.size();
LOG(debug, "{} edges per GPU", edgesPerGPU);
// launch counting operations
size_t edgeStart = 0;
for (auto &counter : counters) {
const size_t edgeStop = ::min(edgeStart + edgesPerGPU, adj.nnz());
const size_t numEdges = edgeStop - edgeStart;
LOG(debug, "start async count on GPU {} ({} edges)", counter.device(), numEdges);
counter.count_async(adj.view(), edgeStart, numEdges, opts.dimBlock);
edgeStart += edgesPerGPU;
}
// wait for counting operations to finish
uint64_t total = 0;
for (auto &counter : counters) {
LOG(debug, "wait for counter on GPU {}", counter.device());
counter.sync();
total += counter.count();
}
elapsed = (std::chrono::system_clock::now() - startHints).count() / 1e9;
LOG(info, "{} triangles ({} teps)", total, adj.nnz() / elapsed);
LOG(info, "count time: {}s", elapsed);
for (auto &counter : counters) {
LOG(info, "GPU {} kernel time: {}", counter.device(), counter.kernel_time());
}
times.push_back(elapsed);
if (counters.size() == 1) {
kernelTimes.push_back(counters[0].kernel_time());
} else {
kernelTimes.push_back(0);
}
nodes = adj.num_rows();
nnz = adj.nnz();
tris = total;
}
if (opts.iters > 0) {
fmt::print("dysel");
fmt::print("{}{}", opts.sep, opts.dimBlock);
fmt::print("{}{}", opts.sep, opts.path);
fmt::print("{}{}", opts.sep, nodes);
fmt::print("{}{}", opts.sep, nnz);
fmt::print("{}{}", opts.sep, tris);
print_vec(times, opts.sep);
for (const auto &s : times) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(kernelTimes, opts.sep);
for (const auto &s : kernelTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
}
fmt::print("\n");
return 0;
}
int main(int argc, char **argv) {
pangolin::init();
bool help = false;
bool debug = false;
bool verbose = false;
bool quiet = false;
bool wide = false;
bool header = false;
RunOptions opts;
opts.dimBlock = 512;
opts.iters = 1;
opts.readMostly = false;
opts.accessedBy = false;
opts.prefetchAsync = false;
opts.sep = ",";
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(quiet)["--quiet"]("only print errors");
cli = cli | clara::Opt(wide)["--wide"]("64-bit node IDs");
cli = cli | clara::Opt(header)["--header"]("only print CSV header");
cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use");
cli = cli | clara::Opt(opts.dimBlock, "block-dim")["--bs"]("Number of threads in a block");
cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts");
cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(critical, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
} else if (quiet) {
pangolin::logger::set_level(pangolin::logger::Level::ERR);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (header) {
print_header(opts);
} else {
if (wide) {
LOG(debug, "64-bit node indices");
return run<uint64_t>(opts);
} else {
LOG(debug, "32-bit node indices");
return run<uint32_t>(opts);
}
}
return 0;
}
| 05b74cae14851eea6576c49b30695402cb9b5258.cu | /*!
Count triangles using warp-granularity dynamic algorithm selection
*/
#include <iostream>
#include <nvToolsExt.h>
#include "clara/clara.hpp"
#include <fmt/format.h>
#include "pangolin/algorithm/tc_edge_dysel.cuh"
#include "pangolin/bounded_buffer.hpp"
#include "pangolin/configure.hpp"
#include "pangolin/cuda_cxx/rc_stream.hpp"
#include "pangolin/file/edge_list_file.hpp"
#include "pangolin/init.hpp"
#include "pangolin/sparse/csr_coo.hpp"
struct RunOptions {
std::vector<int> gpus;
std::string path;
std::string sep;
bool readMostly;
bool accessedBy;
bool prefetchAsync;
int dimBlock;
int iters;
};
template <typename V> void print_vec(const V &vec, const std::string &sep) {
for (const auto &e : vec) {
fmt::print("{}{}", sep, e);
}
}
void print_header(RunOptions &opts) {
fmt::print("bmark{0}bs{0}sb{0}graph{0}nodes{0}edges{0}tris", opts.sep);
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}time{}", opts.sep, i);
}
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}teps{}", opts.sep, i);
}
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_time{}", opts.sep, i);
}
for (int i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_teps{}", opts.sep, i);
}
fmt::print("\n");
}
template <typename Index> int run(RunOptions &opts) {
typedef pangolin::DiEdge<Index> Edge;
std::vector<int> gpus = opts.gpus;
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
// read data
auto start = std::chrono::system_clock::now();
pangolin::EdgeListFile file(opts.path);
std::vector<Edge> edges;
std::vector<Edge> fileEdges;
while (file.get_edges(fileEdges, 500)) {
edges.insert(edges.end(), fileEdges.begin(), fileEdges.end());
}
double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "read_data time {}s", elapsed);
LOG(debug, "read {} edges", edges.size());
// create csr and count `iters` times
std::vector<double> times;
std::vector<double> kernelTimes;
uint64_t nodes;
uint64_t tris;
uint64_t nnz;
for (int i = 0; i < opts.iters; ++i) {
// create csr
start = std::chrono::system_clock::now();
auto upperTriangularFilter = [](Edge e) { return e.src < e.dst; };
auto lowerTriangularFilter = [](Edge e) { return e.src > e.dst; };
auto adj = pangolin::CSRCOO<Index>::from_edges(edges.begin(), edges.end(), upperTriangularFilter);
LOG(debug, "nnz = {}", adj.nnz());
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "create CSR time {}s", elapsed);
// read-mostly
nvtxRangePush("read-mostly");
const auto startHints = std::chrono::system_clock::now();
if (opts.readMostly) {
adj.read_mostly();
for (const auto &gpu : gpus) {
CUDA_RUNTIME(cudaSetDevice(gpu));
CUDA_RUNTIME(cudaDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - startHints).count() / 1e9;
nvtxRangePop();
LOG(info, "read-mostly CSR time {}s", elapsed);
// accessed-by
start = std::chrono::system_clock::now();
if (opts.accessedBy) {
for (const auto &gpu : gpus) {
adj.accessed_by(gpu);
CUDA_RUNTIME(cudaSetDevice(gpu));
CUDA_RUNTIME(cudaDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
start = std::chrono::system_clock::now();
if (opts.prefetchAsync) {
for (const auto &gpu : gpus) {
adj.prefetch_async(gpu);
CUDA_RUNTIME(cudaSetDevice(gpu));
CUDA_RUNTIME(cudaDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "prefetch CSR time {}s", elapsed);
// create async counters
start = std::chrono::system_clock::now();
std::vector<pangolin::EdgeWarpDyselTC> counters;
for (int dev : gpus) {
LOG(debug, "create device {} counter", dev);
counters.push_back(std::move(pangolin::EdgeWarpDyselTC(dev)));
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "counter ctor time {}s", elapsed);
// determine the number of edges per gpu
const size_t edgesPerGPU = (adj.nnz() + gpus.size() - 1) / gpus.size();
LOG(debug, "{} edges per GPU", edgesPerGPU);
// launch counting operations
size_t edgeStart = 0;
for (auto &counter : counters) {
const size_t edgeStop = std::min(edgeStart + edgesPerGPU, adj.nnz());
const size_t numEdges = edgeStop - edgeStart;
LOG(debug, "start async count on GPU {} ({} edges)", counter.device(), numEdges);
counter.count_async(adj.view(), edgeStart, numEdges, opts.dimBlock);
edgeStart += edgesPerGPU;
}
// wait for counting operations to finish
uint64_t total = 0;
for (auto &counter : counters) {
LOG(debug, "wait for counter on GPU {}", counter.device());
counter.sync();
total += counter.count();
}
elapsed = (std::chrono::system_clock::now() - startHints).count() / 1e9;
LOG(info, "{} triangles ({} teps)", total, adj.nnz() / elapsed);
LOG(info, "count time: {}s", elapsed);
for (auto &counter : counters) {
LOG(info, "GPU {} kernel time: {}", counter.device(), counter.kernel_time());
}
times.push_back(elapsed);
if (counters.size() == 1) {
kernelTimes.push_back(counters[0].kernel_time());
} else {
kernelTimes.push_back(0);
}
nodes = adj.num_rows();
nnz = adj.nnz();
tris = total;
}
if (opts.iters > 0) {
fmt::print("dysel");
fmt::print("{}{}", opts.sep, opts.dimBlock);
fmt::print("{}{}", opts.sep, opts.path);
fmt::print("{}{}", opts.sep, nodes);
fmt::print("{}{}", opts.sep, nnz);
fmt::print("{}{}", opts.sep, tris);
print_vec(times, opts.sep);
for (const auto &s : times) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(kernelTimes, opts.sep);
for (const auto &s : kernelTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
}
fmt::print("\n");
return 0;
}
int main(int argc, char **argv) {
pangolin::init();
bool help = false;
bool debug = false;
bool verbose = false;
bool quiet = false;
bool wide = false;
bool header = false;
RunOptions opts;
opts.dimBlock = 512;
opts.iters = 1;
opts.readMostly = false;
opts.accessedBy = false;
opts.prefetchAsync = false;
opts.sep = ",";
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(quiet)["--quiet"]("only print errors");
cli = cli | clara::Opt(wide)["--wide"]("64-bit node IDs");
cli = cli | clara::Opt(header)["--header"]("only print CSV header");
cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use");
cli = cli | clara::Opt(opts.dimBlock, "block-dim")["--bs"]("Number of threads in a block");
cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts");
cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(critical, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
} else if (quiet) {
pangolin::logger::set_level(pangolin::logger::Level::ERR);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (header) {
print_header(opts);
} else {
if (wide) {
LOG(debug, "64-bit node indices");
return run<uint64_t>(opts);
} else {
LOG(debug, "32-bit node indices");
return run<uint32_t>(opts);
}
}
return 0;
}
|
af698929bcb3a9caaa7d58fed47e593c7f1459fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transposeSmemUnrollPadDyn (float *out, float *in, const int nx, const int ny)
{
// dynamic shared memory
extern __shared__ float tile[];
unsigned int ix = blockDim.x * blockIdx.x * 2 + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int ti = iy * nx + ix;
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockDim.y * blockIdx.y + icol;
unsigned int iy2 = blockDim.x * 2 * blockIdx.x + irow;
unsigned int to = iy2 * ny + ix2;
// transpose with boundary test
if (ix + blockDim.x < nx && iy < ny)
{
// load data from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) +
threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
// thread synchronization
__syncthreads();
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
} | af698929bcb3a9caaa7d58fed47e593c7f1459fb.cu | #include "includes.h"
__global__ void transposeSmemUnrollPadDyn (float *out, float *in, const int nx, const int ny)
{
// dynamic shared memory
extern __shared__ float tile[];
unsigned int ix = blockDim.x * blockIdx.x * 2 + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int ti = iy * nx + ix;
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockDim.y * blockIdx.y + icol;
unsigned int iy2 = blockDim.x * 2 * blockIdx.x + irow;
unsigned int to = iy2 * ny + ix2;
// transpose with boundary test
if (ix + blockDim.x < nx && iy < ny)
{
// load data from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) +
threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
// thread synchronization
__syncthreads();
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
} |
1e14912ebca30a13ca29192e1219b1062c84c67d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box3d3r-32x32-1-256_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 685
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-0.176f*A[t%2][i-3][j][k] +
0.0010f*A[t%2][i-3][j-3][k-3] +
0.0020f*A[t%2][i-3][j-3][k-2] +
0.0030f*A[t%2][i-3][j-3][k-1] +
0.0040f*A[t%2][i-3][j-3][k] +
0.0050f*A[t%2][i-3][j-3][k+1] +
0.0060f*A[t%2][i-3][j-3][k+2] +
0.0070f*A[t%2][i-3][j-3][k+3] +
0.0080f*A[t%2][i-3][j-2][k-3] +
0.0090f*A[t%2][i-3][j-2][k-2] +
0.0100f*A[t%2][i-3][j-2][k-1] +
0.0110f*A[t%2][i-3][j-2][k] +
0.0120f*A[t%2][i-3][j-2][k+1] +
0.0130f*A[t%2][i-3][j-2][k+2] +
0.0140f*A[t%2][i-3][j-2][k+3] +
0.0150f*A[t%2][i-3][j-1][k-3] +
0.0160f*A[t%2][i-3][j-1][k-2] +
0.0170f*A[t%2][i-3][j-1][k-1] +
0.0180f*A[t%2][i-3][j-1][k] +
0.0190f*A[t%2][i-3][j-1][k+1] +
0.0200f*A[t%2][i-3][j-1][k+2] +
0.0210f*A[t%2][i-3][j-1][k+3] +
0.0220f*A[t%2][i-3][j][k-3] +
0.0230f*A[t%2][i-3][j][k-2] +
0.0240f*A[t%2][i-3][j][k-1] +
0.0250f*A[t%2][i-3][j][k+1] +
0.0260f*A[t%2][i-3][j][k+2] +
0.0270f*A[t%2][i-3][j][k+3] +
0.0280f*A[t%2][i-3][j+1][k-3] +
0.0290f*A[t%2][i-3][j+1][k-2] +
0.0300f*A[t%2][i-3][j+1][k-1] +
0.0310f*A[t%2][i-3][j+1][k] +
0.0320f*A[t%2][i-3][j+1][k+1] +
0.0330f*A[t%2][i-3][j+1][k+2] +
0.0340f*A[t%2][i-3][j+1][k+3] +
0.0350f*A[t%2][i-3][j+2][k-3] +
0.0360f*A[t%2][i-3][j+2][k-2] +
0.0370f*A[t%2][i-3][j+2][k-1] +
0.0380f*A[t%2][i-3][j+2][k] +
0.0390f*A[t%2][i-3][j+2][k+1] +
0.0400f*A[t%2][i-3][j+2][k+2] +
0.0410f*A[t%2][i-3][j+2][k+3] +
0.0420f*A[t%2][i-3][j+3][k-3] +
0.0430f*A[t%2][i-3][j+3][k-2] +
0.0440f*A[t%2][i-3][j+3][k-1] +
0.0450f*A[t%2][i-3][j+3][k] +
0.0460f*A[t%2][i-3][j+3][k+1] +
0.0470f*A[t%2][i-3][j+3][k+2] +
0.0480f*A[t%2][i-3][j+3][k+3] +
0.1808f*A[t%2][i-2][j][k] -
0.0011f*A[t%2][i-2][j-3][k-3] -
0.0021f*A[t%2][i-2][j-3][k-2] -
0.0031f*A[t%2][i-2][j-3][k-1] -
0.0041f*A[t%2][i-2][j-3][k] -
0.0051f*A[t%2][i-2][j-3][k+1] -
0.0061f*A[t%2][i-2][j-3][k+2] -
0.0071f*A[t%2][i-2][j-3][k+3] -
0.0081f*A[t%2][i-2][j-2][k-3] -
0.0091f*A[t%2][i-2][j-2][k-2] -
0.0101f*A[t%2][i-2][j-2][k-1] -
0.0111f*A[t%2][i-2][j-2][k] -
0.0121f*A[t%2][i-2][j-2][k+1] -
0.0131f*A[t%2][i-2][j-2][k+2] -
0.0141f*A[t%2][i-2][j-2][k+3] -
0.0151f*A[t%2][i-2][j-1][k-3] -
0.0161f*A[t%2][i-2][j-1][k-2] -
0.0171f*A[t%2][i-2][j-1][k-1] -
0.0181f*A[t%2][i-2][j-1][k] -
0.0191f*A[t%2][i-2][j-1][k+1] -
0.0201f*A[t%2][i-2][j-1][k+2] -
0.0211f*A[t%2][i-2][j-1][k+3] -
0.0221f*A[t%2][i-2][j][k-3] -
0.0231f*A[t%2][i-2][j][k-2] -
0.0241f*A[t%2][i-2][j][k-1] -
0.0251f*A[t%2][i-2][j][k+1] -
0.0261f*A[t%2][i-2][j][k+2] -
0.0271f*A[t%2][i-2][j][k+3] -
0.0281f*A[t%2][i-2][j+1][k-3] -
0.0291f*A[t%2][i-2][j+1][k-2] -
0.0301f*A[t%2][i-2][j+1][k-1] -
0.0311f*A[t%2][i-2][j+1][k] -
0.0321f*A[t%2][i-2][j+1][k+1] -
0.0331f*A[t%2][i-2][j+1][k+2] -
0.0341f*A[t%2][i-2][j+1][k+3] -
0.0351f*A[t%2][i-2][j+2][k-3] -
0.0361f*A[t%2][i-2][j+2][k-2] -
0.0371f*A[t%2][i-2][j+2][k-1] -
0.0381f*A[t%2][i-2][j+2][k] -
0.0391f*A[t%2][i-2][j+2][k+1] -
0.0401f*A[t%2][i-2][j+2][k+2] -
0.0411f*A[t%2][i-2][j+2][k+3] -
0.0421f*A[t%2][i-2][j+3][k-3] -
0.0431f*A[t%2][i-2][j+3][k-2] -
0.0441f*A[t%2][i-2][j+3][k-1] -
0.0451f*A[t%2][i-2][j+3][k] -
0.0461f*A[t%2][i-2][j+3][k+1] -
0.0471f*A[t%2][i-2][j+3][k+2] -
0.0481f*A[t%2][i-2][j+3][k+3] +
-0.1856f*A[t%2][i-1][j][k] +
0.0012f*A[t%2][i-1][j-3][k-3] +
0.0022f*A[t%2][i-1][j-3][k-2] +
0.0032f*A[t%2][i-1][j-3][k-1] +
0.0042f*A[t%2][i-1][j-3][k] +
0.0052f*A[t%2][i-1][j-3][k+1] +
0.0062f*A[t%2][i-1][j-3][k+2] +
0.0072f*A[t%2][i-1][j-3][k+3] +
0.0082f*A[t%2][i-1][j-2][k-3] +
0.0092f*A[t%2][i-1][j-2][k-2] +
0.0102f*A[t%2][i-1][j-2][k-1] +
0.0112f*A[t%2][i-1][j-2][k] +
0.0122f*A[t%2][i-1][j-2][k+1] +
0.0132f*A[t%2][i-1][j-2][k+2] +
0.0142f*A[t%2][i-1][j-2][k+3] +
0.0152f*A[t%2][i-1][j-1][k-3] +
0.0162f*A[t%2][i-1][j-1][k-2] +
0.0172f*A[t%2][i-1][j-1][k-1] +
0.0182f*A[t%2][i-1][j-1][k] +
0.0192f*A[t%2][i-1][j-1][k+1] +
0.0202f*A[t%2][i-1][j-1][k+2] +
0.0212f*A[t%2][i-1][j-1][k+3] +
0.0222f*A[t%2][i-1][j][k-3] +
0.0232f*A[t%2][i-1][j][k-2] +
0.0242f*A[t%2][i-1][j][k-1] +
0.0252f*A[t%2][i-1][j][k+1] +
0.0262f*A[t%2][i-1][j][k+2] +
0.0272f*A[t%2][i-1][j][k+3] +
0.0282f*A[t%2][i-1][j+1][k-3] +
0.0292f*A[t%2][i-1][j+1][k-2] +
0.0302f*A[t%2][i-1][j+1][k-1] +
0.0312f*A[t%2][i-1][j+1][k] +
0.0322f*A[t%2][i-1][j+1][k+1] +
0.0332f*A[t%2][i-1][j+1][k+2] +
0.0342f*A[t%2][i-1][j+1][k+3] +
0.0352f*A[t%2][i-1][j+2][k-3] +
0.0362f*A[t%2][i-1][j+2][k-2] +
0.0372f*A[t%2][i-1][j+2][k-1] +
0.0382f*A[t%2][i-1][j+2][k] +
0.0392f*A[t%2][i-1][j+2][k+1] +
0.0402f*A[t%2][i-1][j+2][k+2] +
0.0412f*A[t%2][i-1][j+2][k+3] +
0.0422f*A[t%2][i-1][j+3][k-3] +
0.0432f*A[t%2][i-1][j+3][k-2] +
0.0442f*A[t%2][i-1][j+3][k-1] +
0.0452f*A[t%2][i-1][j+3][k] +
0.0462f*A[t%2][i-1][j+3][k+1] +
0.0472f*A[t%2][i-1][j+3][k+2] +
0.0482f*A[t%2][i-1][j+3][k+3] +
-0.1904f*A[t%2][i][j][k] +
0.0013f*A[t%2][i][j-3][k-3] +
0.0023f*A[t%2][i][j-3][k-2] +
0.0033f*A[t%2][i][j-3][k-1] +
0.0043f*A[t%2][i][j-3][k] +
0.0053f*A[t%2][i][j-3][k+1] +
0.0063f*A[t%2][i][j-3][k+2] +
0.0073f*A[t%2][i][j-3][k+3] +
0.0083f*A[t%2][i][j-2][k-3] +
0.0093f*A[t%2][i][j-2][k-2] +
0.0103f*A[t%2][i][j-2][k-1] +
0.0113f*A[t%2][i][j-2][k] +
0.0123f*A[t%2][i][j-2][k+1] +
0.0133f*A[t%2][i][j-2][k+2] +
0.0143f*A[t%2][i][j-2][k+3] +
0.0153f*A[t%2][i][j-1][k-3] +
0.0163f*A[t%2][i][j-1][k-2] +
0.0173f*A[t%2][i][j-1][k-1] +
0.0183f*A[t%2][i][j-1][k] +
0.0193f*A[t%2][i][j-1][k+1] +
0.0203f*A[t%2][i][j-1][k+2] +
0.0213f*A[t%2][i][j-1][k+3] +
0.0223f*A[t%2][i][j][k-3] +
0.0233f*A[t%2][i][j][k-2] +
0.0243f*A[t%2][i][j][k-1] +
0.0253f*A[t%2][i][j][k+1] +
0.0263f*A[t%2][i][j][k+2] +
0.0273f*A[t%2][i][j][k+3] +
0.0283f*A[t%2][i][j+1][k-3] +
0.0293f*A[t%2][i][j+1][k-2] +
0.0303f*A[t%2][i][j+1][k-1] +
0.0313f*A[t%2][i][j+1][k] +
0.0323f*A[t%2][i][j+1][k+1] +
0.0333f*A[t%2][i][j+1][k+2] +
0.0343f*A[t%2][i][j+1][k+3] +
0.0353f*A[t%2][i][j+2][k-3] +
0.0363f*A[t%2][i][j+2][k-2] +
0.0373f*A[t%2][i][j+2][k-1] +
0.0383f*A[t%2][i][j+2][k] +
0.0393f*A[t%2][i][j+2][k+1] +
0.0403f*A[t%2][i][j+2][k+2] +
0.0413f*A[t%2][i][j+2][k+3] +
0.0423f*A[t%2][i][j+3][k-3] +
0.0433f*A[t%2][i][j+3][k-2] +
0.0443f*A[t%2][i][j+3][k-1] +
0.0453f*A[t%2][i][j+3][k] +
0.0463f*A[t%2][i][j+3][k+1] +
0.0473f*A[t%2][i][j+3][k+2] +
0.0483f*A[t%2][i][j+3][k+3] +
0.1952f*A[t%2][i+1][j][k] -
0.0014f*A[t%2][i+1][j-3][k-3] -
0.0024f*A[t%2][i+1][j-3][k-2] -
0.0034f*A[t%2][i+1][j-3][k-1] -
0.0044f*A[t%2][i+1][j-3][k] -
0.0054f*A[t%2][i+1][j-3][k+1] -
0.0064f*A[t%2][i+1][j-3][k+2] -
0.0074f*A[t%2][i+1][j-3][k+3] -
0.0084f*A[t%2][i+1][j-2][k-3] -
0.0094f*A[t%2][i+1][j-2][k-2] -
0.0104f*A[t%2][i+1][j-2][k-1] -
0.0114f*A[t%2][i+1][j-2][k] -
0.0124f*A[t%2][i+1][j-2][k+1] -
0.0134f*A[t%2][i+1][j-2][k+2] -
0.0144f*A[t%2][i+1][j-2][k+3] -
0.0154f*A[t%2][i+1][j-1][k-3] -
0.0164f*A[t%2][i+1][j-1][k-2] -
0.0174f*A[t%2][i+1][j-1][k-1] -
0.0184f*A[t%2][i+1][j-1][k] -
0.0194f*A[t%2][i+1][j-1][k+1] -
0.0204f*A[t%2][i+1][j-1][k+2] -
0.0214f*A[t%2][i+1][j-1][k+3] -
0.0224f*A[t%2][i+1][j][k-3] -
0.0234f*A[t%2][i+1][j][k-2] -
0.0244f*A[t%2][i+1][j][k-1] -
0.0254f*A[t%2][i+1][j][k+1] -
0.0264f*A[t%2][i+1][j][k+2] -
0.0274f*A[t%2][i+1][j][k+3] -
0.0284f*A[t%2][i+1][j+1][k-3] -
0.0294f*A[t%2][i+1][j+1][k-2] -
0.0304f*A[t%2][i+1][j+1][k-1] -
0.0314f*A[t%2][i+1][j+1][k] -
0.0324f*A[t%2][i+1][j+1][k+1] -
0.0334f*A[t%2][i+1][j+1][k+2] -
0.0344f*A[t%2][i+1][j+1][k+3] -
0.0354f*A[t%2][i+1][j+2][k-3] -
0.0364f*A[t%2][i+1][j+2][k-2] -
0.0374f*A[t%2][i+1][j+2][k-1] -
0.0384f*A[t%2][i+1][j+2][k] -
0.0394f*A[t%2][i+1][j+2][k+1] -
0.0404f*A[t%2][i+1][j+2][k+2] -
0.0414f*A[t%2][i+1][j+2][k+3] -
0.0424f*A[t%2][i+1][j+3][k-3] -
0.0434f*A[t%2][i+1][j+3][k-2] -
0.0444f*A[t%2][i+1][j+3][k-1] -
0.0454f*A[t%2][i+1][j+3][k] -
0.0464f*A[t%2][i+1][j+3][k+1] -
0.0474f*A[t%2][i+1][j+3][k+2] -
0.0484f*A[t%2][i+1][j+3][k+3] -
-0.300f*A[t%2][i+2][j][k] +
0.0015f*A[t%2][i+2][j-3][k-3] +
0.0025f*A[t%2][i+2][j-3][k-2] +
0.0035f*A[t%2][i+2][j-3][k-1] +
0.0045f*A[t%2][i+2][j-3][k] +
0.0055f*A[t%2][i+2][j-3][k+1] +
0.0065f*A[t%2][i+2][j-3][k+2] +
0.0075f*A[t%2][i+2][j-3][k+3] +
0.0085f*A[t%2][i+2][j-2][k-3] +
0.0095f*A[t%2][i+2][j-2][k-2] +
0.0105f*A[t%2][i+2][j-2][k-1] +
0.0115f*A[t%2][i+2][j-2][k] +
0.0125f*A[t%2][i+2][j-2][k+1] +
0.0135f*A[t%2][i+2][j-2][k+2] +
0.0145f*A[t%2][i+2][j-2][k+3] +
0.0155f*A[t%2][i+2][j-1][k-3] +
0.0165f*A[t%2][i+2][j-1][k-2] +
0.0175f*A[t%2][i+2][j-1][k-1] +
0.0185f*A[t%2][i+2][j-1][k] +
0.0195f*A[t%2][i+2][j-1][k+1] +
0.0205f*A[t%2][i+2][j-1][k+2] +
0.0215f*A[t%2][i+2][j-1][k+3] +
0.0225f*A[t%2][i+2][j][k-3] +
0.0235f*A[t%2][i+2][j][k-2] +
0.0245f*A[t%2][i+2][j][k-1] +
0.0255f*A[t%2][i+2][j][k+1] +
0.0265f*A[t%2][i+2][j][k+2] +
0.0275f*A[t%2][i+2][j][k+3] +
0.0285f*A[t%2][i+2][j+1][k-3] +
0.0295f*A[t%2][i+2][j+1][k-2] +
0.0305f*A[t%2][i+2][j+1][k-1] +
0.0315f*A[t%2][i+2][j+1][k] +
0.0325f*A[t%2][i+2][j+1][k+1] +
0.0335f*A[t%2][i+2][j+1][k+2] +
0.0345f*A[t%2][i+2][j+1][k+3] +
0.0355f*A[t%2][i+2][j+2][k-3] +
0.0365f*A[t%2][i+2][j+2][k-2] +
0.0375f*A[t%2][i+2][j+2][k-1] +
0.0385f*A[t%2][i+2][j+2][k] +
0.0395f*A[t%2][i+2][j+2][k+1] +
0.0405f*A[t%2][i+2][j+2][k+2] +
0.0415f*A[t%2][i+2][j+2][k+3] +
0.0425f*A[t%2][i+2][j+3][k-3] +
0.0435f*A[t%2][i+2][j+3][k-2] +
0.0445f*A[t%2][i+2][j+3][k-1] +
0.0455f*A[t%2][i+2][j+3][k] +
0.0465f*A[t%2][i+2][j+3][k+1] +
0.0475f*A[t%2][i+2][j+3][k+2] +
0.1485f*A[t%2][i+2][j+3][k+3] +
0.2048f*A[t%2][i+3][j][k] -
0.0016f*A[t%2][i+3][j-3][k-3] -
0.0026f*A[t%2][i+3][j-3][k-2] -
0.0036f*A[t%2][i+3][j-3][k-1] -
0.0046f*A[t%2][i+3][j-3][k] -
0.0056f*A[t%2][i+3][j-3][k+1] -
0.0066f*A[t%2][i+3][j-3][k+2] -
0.0076f*A[t%2][i+3][j-3][k+3] -
0.0086f*A[t%2][i+3][j-2][k-3] -
0.0096f*A[t%2][i+3][j-2][k-2] -
0.0106f*A[t%2][i+3][j-2][k-1] -
0.0116f*A[t%2][i+3][j-2][k] -
0.0126f*A[t%2][i+3][j-2][k+1] -
0.0136f*A[t%2][i+3][j-2][k+2] -
0.0146f*A[t%2][i+3][j-2][k+3] -
0.0156f*A[t%2][i+3][j-1][k-3] -
0.0166f*A[t%2][i+3][j-1][k-2] -
0.0176f*A[t%2][i+3][j-1][k-1] -
0.0186f*A[t%2][i+3][j-1][k] -
0.0196f*A[t%2][i+3][j-1][k+1] -
0.0206f*A[t%2][i+3][j-1][k+2] -
0.0216f*A[t%2][i+3][j-1][k+3] -
0.0226f*A[t%2][i+3][j][k-3] -
0.0236f*A[t%2][i+3][j][k-2] -
0.0246f*A[t%2][i+3][j][k-1] -
0.0256f*A[t%2][i+3][j][k+1] -
0.0266f*A[t%2][i+3][j][k+2] -
0.0276f*A[t%2][i+3][j][k+3] -
0.0286f*A[t%2][i+3][j+1][k-3] -
0.0296f*A[t%2][i+3][j+1][k-2] -
0.0306f*A[t%2][i+3][j+1][k-1] -
0.0316f*A[t%2][i+3][j+1][k] -
0.0326f*A[t%2][i+3][j+1][k+1] -
0.0336f*A[t%2][i+3][j+1][k+2] -
0.0346f*A[t%2][i+3][j+1][k+3] -
0.0356f*A[t%2][i+3][j+2][k-3] -
0.0366f*A[t%2][i+3][j+2][k-2] -
0.0376f*A[t%2][i+3][j+2][k-1] -
0.0386f*A[t%2][i+3][j+2][k] -
0.0396f*A[t%2][i+3][j+2][k+1] -
0.0406f*A[t%2][i+3][j+2][k+2] -
0.0416f*A[t%2][i+3][j+2][k+3] -
0.0426f*A[t%2][i+3][j+3][k-3] -
0.0436f*A[t%2][i+3][j+3][k-2] -
0.0446f*A[t%2][i+3][j+3][k-1] -
0.0456f*A[t%2][i+3][j+3][k] -
0.0466f*A[t%2][i+3][j+3][k+1] -
0.0476f*A[t%2][i+3][j+3][k+2] -
0.0486f*A[t%2][i+3][j+3][k+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 1e14912ebca30a13ca29192e1219b1062c84c67d.cu | #include <assert.h>
#include <stdio.h>
#include "box3d3r-32x32-1-256_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 685
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-0.176f*A[t%2][i-3][j][k] +
0.0010f*A[t%2][i-3][j-3][k-3] +
0.0020f*A[t%2][i-3][j-3][k-2] +
0.0030f*A[t%2][i-3][j-3][k-1] +
0.0040f*A[t%2][i-3][j-3][k] +
0.0050f*A[t%2][i-3][j-3][k+1] +
0.0060f*A[t%2][i-3][j-3][k+2] +
0.0070f*A[t%2][i-3][j-3][k+3] +
0.0080f*A[t%2][i-3][j-2][k-3] +
0.0090f*A[t%2][i-3][j-2][k-2] +
0.0100f*A[t%2][i-3][j-2][k-1] +
0.0110f*A[t%2][i-3][j-2][k] +
0.0120f*A[t%2][i-3][j-2][k+1] +
0.0130f*A[t%2][i-3][j-2][k+2] +
0.0140f*A[t%2][i-3][j-2][k+3] +
0.0150f*A[t%2][i-3][j-1][k-3] +
0.0160f*A[t%2][i-3][j-1][k-2] +
0.0170f*A[t%2][i-3][j-1][k-1] +
0.0180f*A[t%2][i-3][j-1][k] +
0.0190f*A[t%2][i-3][j-1][k+1] +
0.0200f*A[t%2][i-3][j-1][k+2] +
0.0210f*A[t%2][i-3][j-1][k+3] +
0.0220f*A[t%2][i-3][j][k-3] +
0.0230f*A[t%2][i-3][j][k-2] +
0.0240f*A[t%2][i-3][j][k-1] +
0.0250f*A[t%2][i-3][j][k+1] +
0.0260f*A[t%2][i-3][j][k+2] +
0.0270f*A[t%2][i-3][j][k+3] +
0.0280f*A[t%2][i-3][j+1][k-3] +
0.0290f*A[t%2][i-3][j+1][k-2] +
0.0300f*A[t%2][i-3][j+1][k-1] +
0.0310f*A[t%2][i-3][j+1][k] +
0.0320f*A[t%2][i-3][j+1][k+1] +
0.0330f*A[t%2][i-3][j+1][k+2] +
0.0340f*A[t%2][i-3][j+1][k+3] +
0.0350f*A[t%2][i-3][j+2][k-3] +
0.0360f*A[t%2][i-3][j+2][k-2] +
0.0370f*A[t%2][i-3][j+2][k-1] +
0.0380f*A[t%2][i-3][j+2][k] +
0.0390f*A[t%2][i-3][j+2][k+1] +
0.0400f*A[t%2][i-3][j+2][k+2] +
0.0410f*A[t%2][i-3][j+2][k+3] +
0.0420f*A[t%2][i-3][j+3][k-3] +
0.0430f*A[t%2][i-3][j+3][k-2] +
0.0440f*A[t%2][i-3][j+3][k-1] +
0.0450f*A[t%2][i-3][j+3][k] +
0.0460f*A[t%2][i-3][j+3][k+1] +
0.0470f*A[t%2][i-3][j+3][k+2] +
0.0480f*A[t%2][i-3][j+3][k+3] +
0.1808f*A[t%2][i-2][j][k] -
0.0011f*A[t%2][i-2][j-3][k-3] -
0.0021f*A[t%2][i-2][j-3][k-2] -
0.0031f*A[t%2][i-2][j-3][k-1] -
0.0041f*A[t%2][i-2][j-3][k] -
0.0051f*A[t%2][i-2][j-3][k+1] -
0.0061f*A[t%2][i-2][j-3][k+2] -
0.0071f*A[t%2][i-2][j-3][k+3] -
0.0081f*A[t%2][i-2][j-2][k-3] -
0.0091f*A[t%2][i-2][j-2][k-2] -
0.0101f*A[t%2][i-2][j-2][k-1] -
0.0111f*A[t%2][i-2][j-2][k] -
0.0121f*A[t%2][i-2][j-2][k+1] -
0.0131f*A[t%2][i-2][j-2][k+2] -
0.0141f*A[t%2][i-2][j-2][k+3] -
0.0151f*A[t%2][i-2][j-1][k-3] -
0.0161f*A[t%2][i-2][j-1][k-2] -
0.0171f*A[t%2][i-2][j-1][k-1] -
0.0181f*A[t%2][i-2][j-1][k] -
0.0191f*A[t%2][i-2][j-1][k+1] -
0.0201f*A[t%2][i-2][j-1][k+2] -
0.0211f*A[t%2][i-2][j-1][k+3] -
0.0221f*A[t%2][i-2][j][k-3] -
0.0231f*A[t%2][i-2][j][k-2] -
0.0241f*A[t%2][i-2][j][k-1] -
0.0251f*A[t%2][i-2][j][k+1] -
0.0261f*A[t%2][i-2][j][k+2] -
0.0271f*A[t%2][i-2][j][k+3] -
0.0281f*A[t%2][i-2][j+1][k-3] -
0.0291f*A[t%2][i-2][j+1][k-2] -
0.0301f*A[t%2][i-2][j+1][k-1] -
0.0311f*A[t%2][i-2][j+1][k] -
0.0321f*A[t%2][i-2][j+1][k+1] -
0.0331f*A[t%2][i-2][j+1][k+2] -
0.0341f*A[t%2][i-2][j+1][k+3] -
0.0351f*A[t%2][i-2][j+2][k-3] -
0.0361f*A[t%2][i-2][j+2][k-2] -
0.0371f*A[t%2][i-2][j+2][k-1] -
0.0381f*A[t%2][i-2][j+2][k] -
0.0391f*A[t%2][i-2][j+2][k+1] -
0.0401f*A[t%2][i-2][j+2][k+2] -
0.0411f*A[t%2][i-2][j+2][k+3] -
0.0421f*A[t%2][i-2][j+3][k-3] -
0.0431f*A[t%2][i-2][j+3][k-2] -
0.0441f*A[t%2][i-2][j+3][k-1] -
0.0451f*A[t%2][i-2][j+3][k] -
0.0461f*A[t%2][i-2][j+3][k+1] -
0.0471f*A[t%2][i-2][j+3][k+2] -
0.0481f*A[t%2][i-2][j+3][k+3] +
-0.1856f*A[t%2][i-1][j][k] +
0.0012f*A[t%2][i-1][j-3][k-3] +
0.0022f*A[t%2][i-1][j-3][k-2] +
0.0032f*A[t%2][i-1][j-3][k-1] +
0.0042f*A[t%2][i-1][j-3][k] +
0.0052f*A[t%2][i-1][j-3][k+1] +
0.0062f*A[t%2][i-1][j-3][k+2] +
0.0072f*A[t%2][i-1][j-3][k+3] +
0.0082f*A[t%2][i-1][j-2][k-3] +
0.0092f*A[t%2][i-1][j-2][k-2] +
0.0102f*A[t%2][i-1][j-2][k-1] +
0.0112f*A[t%2][i-1][j-2][k] +
0.0122f*A[t%2][i-1][j-2][k+1] +
0.0132f*A[t%2][i-1][j-2][k+2] +
0.0142f*A[t%2][i-1][j-2][k+3] +
0.0152f*A[t%2][i-1][j-1][k-3] +
0.0162f*A[t%2][i-1][j-1][k-2] +
0.0172f*A[t%2][i-1][j-1][k-1] +
0.0182f*A[t%2][i-1][j-1][k] +
0.0192f*A[t%2][i-1][j-1][k+1] +
0.0202f*A[t%2][i-1][j-1][k+2] +
0.0212f*A[t%2][i-1][j-1][k+3] +
0.0222f*A[t%2][i-1][j][k-3] +
0.0232f*A[t%2][i-1][j][k-2] +
0.0242f*A[t%2][i-1][j][k-1] +
0.0252f*A[t%2][i-1][j][k+1] +
0.0262f*A[t%2][i-1][j][k+2] +
0.0272f*A[t%2][i-1][j][k+3] +
0.0282f*A[t%2][i-1][j+1][k-3] +
0.0292f*A[t%2][i-1][j+1][k-2] +
0.0302f*A[t%2][i-1][j+1][k-1] +
0.0312f*A[t%2][i-1][j+1][k] +
0.0322f*A[t%2][i-1][j+1][k+1] +
0.0332f*A[t%2][i-1][j+1][k+2] +
0.0342f*A[t%2][i-1][j+1][k+3] +
0.0352f*A[t%2][i-1][j+2][k-3] +
0.0362f*A[t%2][i-1][j+2][k-2] +
0.0372f*A[t%2][i-1][j+2][k-1] +
0.0382f*A[t%2][i-1][j+2][k] +
0.0392f*A[t%2][i-1][j+2][k+1] +
0.0402f*A[t%2][i-1][j+2][k+2] +
0.0412f*A[t%2][i-1][j+2][k+3] +
0.0422f*A[t%2][i-1][j+3][k-3] +
0.0432f*A[t%2][i-1][j+3][k-2] +
0.0442f*A[t%2][i-1][j+3][k-1] +
0.0452f*A[t%2][i-1][j+3][k] +
0.0462f*A[t%2][i-1][j+3][k+1] +
0.0472f*A[t%2][i-1][j+3][k+2] +
0.0482f*A[t%2][i-1][j+3][k+3] +
-0.1904f*A[t%2][i][j][k] +
0.0013f*A[t%2][i][j-3][k-3] +
0.0023f*A[t%2][i][j-3][k-2] +
0.0033f*A[t%2][i][j-3][k-1] +
0.0043f*A[t%2][i][j-3][k] +
0.0053f*A[t%2][i][j-3][k+1] +
0.0063f*A[t%2][i][j-3][k+2] +
0.0073f*A[t%2][i][j-3][k+3] +
0.0083f*A[t%2][i][j-2][k-3] +
0.0093f*A[t%2][i][j-2][k-2] +
0.0103f*A[t%2][i][j-2][k-1] +
0.0113f*A[t%2][i][j-2][k] +
0.0123f*A[t%2][i][j-2][k+1] +
0.0133f*A[t%2][i][j-2][k+2] +
0.0143f*A[t%2][i][j-2][k+3] +
0.0153f*A[t%2][i][j-1][k-3] +
0.0163f*A[t%2][i][j-1][k-2] +
0.0173f*A[t%2][i][j-1][k-1] +
0.0183f*A[t%2][i][j-1][k] +
0.0193f*A[t%2][i][j-1][k+1] +
0.0203f*A[t%2][i][j-1][k+2] +
0.0213f*A[t%2][i][j-1][k+3] +
0.0223f*A[t%2][i][j][k-3] +
0.0233f*A[t%2][i][j][k-2] +
0.0243f*A[t%2][i][j][k-1] +
0.0253f*A[t%2][i][j][k+1] +
0.0263f*A[t%2][i][j][k+2] +
0.0273f*A[t%2][i][j][k+3] +
0.0283f*A[t%2][i][j+1][k-3] +
0.0293f*A[t%2][i][j+1][k-2] +
0.0303f*A[t%2][i][j+1][k-1] +
0.0313f*A[t%2][i][j+1][k] +
0.0323f*A[t%2][i][j+1][k+1] +
0.0333f*A[t%2][i][j+1][k+2] +
0.0343f*A[t%2][i][j+1][k+3] +
0.0353f*A[t%2][i][j+2][k-3] +
0.0363f*A[t%2][i][j+2][k-2] +
0.0373f*A[t%2][i][j+2][k-1] +
0.0383f*A[t%2][i][j+2][k] +
0.0393f*A[t%2][i][j+2][k+1] +
0.0403f*A[t%2][i][j+2][k+2] +
0.0413f*A[t%2][i][j+2][k+3] +
0.0423f*A[t%2][i][j+3][k-3] +
0.0433f*A[t%2][i][j+3][k-2] +
0.0443f*A[t%2][i][j+3][k-1] +
0.0453f*A[t%2][i][j+3][k] +
0.0463f*A[t%2][i][j+3][k+1] +
0.0473f*A[t%2][i][j+3][k+2] +
0.0483f*A[t%2][i][j+3][k+3] +
0.1952f*A[t%2][i+1][j][k] -
0.0014f*A[t%2][i+1][j-3][k-3] -
0.0024f*A[t%2][i+1][j-3][k-2] -
0.0034f*A[t%2][i+1][j-3][k-1] -
0.0044f*A[t%2][i+1][j-3][k] -
0.0054f*A[t%2][i+1][j-3][k+1] -
0.0064f*A[t%2][i+1][j-3][k+2] -
0.0074f*A[t%2][i+1][j-3][k+3] -
0.0084f*A[t%2][i+1][j-2][k-3] -
0.0094f*A[t%2][i+1][j-2][k-2] -
0.0104f*A[t%2][i+1][j-2][k-1] -
0.0114f*A[t%2][i+1][j-2][k] -
0.0124f*A[t%2][i+1][j-2][k+1] -
0.0134f*A[t%2][i+1][j-2][k+2] -
0.0144f*A[t%2][i+1][j-2][k+3] -
0.0154f*A[t%2][i+1][j-1][k-3] -
0.0164f*A[t%2][i+1][j-1][k-2] -
0.0174f*A[t%2][i+1][j-1][k-1] -
0.0184f*A[t%2][i+1][j-1][k] -
0.0194f*A[t%2][i+1][j-1][k+1] -
0.0204f*A[t%2][i+1][j-1][k+2] -
0.0214f*A[t%2][i+1][j-1][k+3] -
0.0224f*A[t%2][i+1][j][k-3] -
0.0234f*A[t%2][i+1][j][k-2] -
0.0244f*A[t%2][i+1][j][k-1] -
0.0254f*A[t%2][i+1][j][k+1] -
0.0264f*A[t%2][i+1][j][k+2] -
0.0274f*A[t%2][i+1][j][k+3] -
0.0284f*A[t%2][i+1][j+1][k-3] -
0.0294f*A[t%2][i+1][j+1][k-2] -
0.0304f*A[t%2][i+1][j+1][k-1] -
0.0314f*A[t%2][i+1][j+1][k] -
0.0324f*A[t%2][i+1][j+1][k+1] -
0.0334f*A[t%2][i+1][j+1][k+2] -
0.0344f*A[t%2][i+1][j+1][k+3] -
0.0354f*A[t%2][i+1][j+2][k-3] -
0.0364f*A[t%2][i+1][j+2][k-2] -
0.0374f*A[t%2][i+1][j+2][k-1] -
0.0384f*A[t%2][i+1][j+2][k] -
0.0394f*A[t%2][i+1][j+2][k+1] -
0.0404f*A[t%2][i+1][j+2][k+2] -
0.0414f*A[t%2][i+1][j+2][k+3] -
0.0424f*A[t%2][i+1][j+3][k-3] -
0.0434f*A[t%2][i+1][j+3][k-2] -
0.0444f*A[t%2][i+1][j+3][k-1] -
0.0454f*A[t%2][i+1][j+3][k] -
0.0464f*A[t%2][i+1][j+3][k+1] -
0.0474f*A[t%2][i+1][j+3][k+2] -
0.0484f*A[t%2][i+1][j+3][k+3] -
-0.300f*A[t%2][i+2][j][k] +
0.0015f*A[t%2][i+2][j-3][k-3] +
0.0025f*A[t%2][i+2][j-3][k-2] +
0.0035f*A[t%2][i+2][j-3][k-1] +
0.0045f*A[t%2][i+2][j-3][k] +
0.0055f*A[t%2][i+2][j-3][k+1] +
0.0065f*A[t%2][i+2][j-3][k+2] +
0.0075f*A[t%2][i+2][j-3][k+3] +
0.0085f*A[t%2][i+2][j-2][k-3] +
0.0095f*A[t%2][i+2][j-2][k-2] +
0.0105f*A[t%2][i+2][j-2][k-1] +
0.0115f*A[t%2][i+2][j-2][k] +
0.0125f*A[t%2][i+2][j-2][k+1] +
0.0135f*A[t%2][i+2][j-2][k+2] +
0.0145f*A[t%2][i+2][j-2][k+3] +
0.0155f*A[t%2][i+2][j-1][k-3] +
0.0165f*A[t%2][i+2][j-1][k-2] +
0.0175f*A[t%2][i+2][j-1][k-1] +
0.0185f*A[t%2][i+2][j-1][k] +
0.0195f*A[t%2][i+2][j-1][k+1] +
0.0205f*A[t%2][i+2][j-1][k+2] +
0.0215f*A[t%2][i+2][j-1][k+3] +
0.0225f*A[t%2][i+2][j][k-3] +
0.0235f*A[t%2][i+2][j][k-2] +
0.0245f*A[t%2][i+2][j][k-1] +
0.0255f*A[t%2][i+2][j][k+1] +
0.0265f*A[t%2][i+2][j][k+2] +
0.0275f*A[t%2][i+2][j][k+3] +
0.0285f*A[t%2][i+2][j+1][k-3] +
0.0295f*A[t%2][i+2][j+1][k-2] +
0.0305f*A[t%2][i+2][j+1][k-1] +
0.0315f*A[t%2][i+2][j+1][k] +
0.0325f*A[t%2][i+2][j+1][k+1] +
0.0335f*A[t%2][i+2][j+1][k+2] +
0.0345f*A[t%2][i+2][j+1][k+3] +
0.0355f*A[t%2][i+2][j+2][k-3] +
0.0365f*A[t%2][i+2][j+2][k-2] +
0.0375f*A[t%2][i+2][j+2][k-1] +
0.0385f*A[t%2][i+2][j+2][k] +
0.0395f*A[t%2][i+2][j+2][k+1] +
0.0405f*A[t%2][i+2][j+2][k+2] +
0.0415f*A[t%2][i+2][j+2][k+3] +
0.0425f*A[t%2][i+2][j+3][k-3] +
0.0435f*A[t%2][i+2][j+3][k-2] +
0.0445f*A[t%2][i+2][j+3][k-1] +
0.0455f*A[t%2][i+2][j+3][k] +
0.0465f*A[t%2][i+2][j+3][k+1] +
0.0475f*A[t%2][i+2][j+3][k+2] +
0.1485f*A[t%2][i+2][j+3][k+3] +
0.2048f*A[t%2][i+3][j][k] -
0.0016f*A[t%2][i+3][j-3][k-3] -
0.0026f*A[t%2][i+3][j-3][k-2] -
0.0036f*A[t%2][i+3][j-3][k-1] -
0.0046f*A[t%2][i+3][j-3][k] -
0.0056f*A[t%2][i+3][j-3][k+1] -
0.0066f*A[t%2][i+3][j-3][k+2] -
0.0076f*A[t%2][i+3][j-3][k+3] -
0.0086f*A[t%2][i+3][j-2][k-3] -
0.0096f*A[t%2][i+3][j-2][k-2] -
0.0106f*A[t%2][i+3][j-2][k-1] -
0.0116f*A[t%2][i+3][j-2][k] -
0.0126f*A[t%2][i+3][j-2][k+1] -
0.0136f*A[t%2][i+3][j-2][k+2] -
0.0146f*A[t%2][i+3][j-2][k+3] -
0.0156f*A[t%2][i+3][j-1][k-3] -
0.0166f*A[t%2][i+3][j-1][k-2] -
0.0176f*A[t%2][i+3][j-1][k-1] -
0.0186f*A[t%2][i+3][j-1][k] -
0.0196f*A[t%2][i+3][j-1][k+1] -
0.0206f*A[t%2][i+3][j-1][k+2] -
0.0216f*A[t%2][i+3][j-1][k+3] -
0.0226f*A[t%2][i+3][j][k-3] -
0.0236f*A[t%2][i+3][j][k-2] -
0.0246f*A[t%2][i+3][j][k-1] -
0.0256f*A[t%2][i+3][j][k+1] -
0.0266f*A[t%2][i+3][j][k+2] -
0.0276f*A[t%2][i+3][j][k+3] -
0.0286f*A[t%2][i+3][j+1][k-3] -
0.0296f*A[t%2][i+3][j+1][k-2] -
0.0306f*A[t%2][i+3][j+1][k-1] -
0.0316f*A[t%2][i+3][j+1][k] -
0.0326f*A[t%2][i+3][j+1][k+1] -
0.0336f*A[t%2][i+3][j+1][k+2] -
0.0346f*A[t%2][i+3][j+1][k+3] -
0.0356f*A[t%2][i+3][j+2][k-3] -
0.0366f*A[t%2][i+3][j+2][k-2] -
0.0376f*A[t%2][i+3][j+2][k-1] -
0.0386f*A[t%2][i+3][j+2][k] -
0.0396f*A[t%2][i+3][j+2][k+1] -
0.0406f*A[t%2][i+3][j+2][k+2] -
0.0416f*A[t%2][i+3][j+2][k+3] -
0.0426f*A[t%2][i+3][j+3][k-3] -
0.0436f*A[t%2][i+3][j+3][k-2] -
0.0446f*A[t%2][i+3][j+3][k-1] -
0.0456f*A[t%2][i+3][j+3][k] -
0.0466f*A[t%2][i+3][j+3][k+1] -
0.0476f*A[t%2][i+3][j+3][k+2] -
0.0486f*A[t%2][i+3][j+3][k+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
bf09315c2b2d1ad48510b32319c37549d8a93520.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
__global__ void sparse_im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * (2*kernel_w+2*kernel_h-4);
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
int i=0, j=0;
for (int id=0; id<=2*kernel_w+2*kernel_h-4; id++) {
if (id>=0 && id<kernel_w-1) { //stage 1
j++;
} else if (id>=kernel_w-1 && id<kernel_w+kernel_h-2) { //stage 2
i++;
} else if (id>=kernel_w+kernel_h-2 && id<2*kernel_w+kernel_h-3) { //stage 3
j--;
} else if (id>=2*kernel_w+kernel_h-3 && id<2*kernel_w+2*kernel_h-4) { //stage 4
i--;
} else {
i=kernel_h/2;
j=kernel_w/2;
}
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void sparse_im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sparse_im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template void sparse_im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void sparse_im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
} // namespace caffe
| bf09315c2b2d1ad48510b32319c37549d8a93520.cu | #include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
__global__ void sparse_im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * (2*kernel_w+2*kernel_h-4);
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
int i=0, j=0;
for (int id=0; id<=2*kernel_w+2*kernel_h-4; id++) {
if (id>=0 && id<kernel_w-1) { //stage 1
j++;
} else if (id>=kernel_w-1 && id<kernel_w+kernel_h-2) { //stage 2
i++;
} else if (id>=kernel_w+kernel_h-2 && id<2*kernel_w+kernel_h-3) { //stage 3
j--;
} else if (id>=2*kernel_w+kernel_h-3 && id<2*kernel_w+2*kernel_h-4) { //stage 4
i--;
} else {
i=kernel_h/2;
j=kernel_w/2;
}
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void sparse_im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
sparse_im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template void sparse_im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void sparse_im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
} // namespace caffe
|
b328bfe75d152253caf70c2f156d47410b6e1c59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sobelEdgeDetectionSharedMemUnrollCoalsed(int *input, int *output, int width, int height, int thresh) {
__shared__ int shMem[4 * _TILESIZE_2 * _TILESIZE_2 ];
int num = _UNROLL_;
int size = num * _TILESIZE_2;
int i = blockIdx.x * (num * _TILESIZE_) + threadIdx.x;
int j = blockIdx.y * (num * _TILESIZE_) + threadIdx.y;
int xind = threadIdx.x;
int yind = threadIdx.y;
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int xOffset = x * (_TILESIZE_), yOffset = y * (_TILESIZE_);
shMem[ size * (yind + yOffset) + (xind + xOffset)] = input[(j + yOffset) * width + (i + xOffset)];
}
}
__syncthreads();
if (i < width - _TILESIZE_ && j < height - _TILESIZE_ && xind > 0 && yind > 0 && xind < (_TILESIZE_2 - 1) && yind < (_TILESIZE_2 - 1))
{
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int xOffset = x * _TILESIZE_, yOffset = y * _TILESIZE_;
int sum1 = shMem[(xind + 1 + xOffset) + size * (yind - 1 + yOffset)] - shMem[(xind - 1 + xOffset) + size * (yind - 1 + yOffset)]
+ 2 * shMem[(xind + 1 + xOffset) + size * (yind + yOffset)] - 2 * shMem[(xind - 1 + xOffset) + size * (yind + yOffset)]
+ shMem[(xind + 1 + xOffset) + size * (yind + 1 + yOffset)] - shMem[(xind - 1 + xOffset) + size * (yind + 1 + yOffset)];
int sum2 = shMem[(xind - 1 + xOffset) + size * (yind - 1 + yOffset)] + 2 * shMem[(xind + xOffset) + size * (yind - 1 + yOffset)] + shMem[(xind + 1 + xOffset) + size * (yind - 1 + yOffset)]
- shMem[(xind - 1 + xOffset) + size * (yind + 1 + yOffset)] - 2 * shMem[(xind + xOffset) + size * (yind + 1 + yOffset)] - shMem[(xind + 1 + xOffset) + size * (yind + 1 + yOffset)];
int magnitude = sum1 * sum1 + sum2 * sum2;
int index = (j + yOffset) * width + (i + xOffset);
if(magnitude > thresh)
output[index] = 255;
else
output[index] = 0;
}
}
}
} | b328bfe75d152253caf70c2f156d47410b6e1c59.cu | #include "includes.h"
__global__ void sobelEdgeDetectionSharedMemUnrollCoalsed(int *input, int *output, int width, int height, int thresh) {
__shared__ int shMem[4 * _TILESIZE_2 * _TILESIZE_2 ];
int num = _UNROLL_;
int size = num * _TILESIZE_2;
int i = blockIdx.x * (num * _TILESIZE_) + threadIdx.x;
int j = blockIdx.y * (num * _TILESIZE_) + threadIdx.y;
int xind = threadIdx.x;
int yind = threadIdx.y;
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int xOffset = x * (_TILESIZE_), yOffset = y * (_TILESIZE_);
shMem[ size * (yind + yOffset) + (xind + xOffset)] = input[(j + yOffset) * width + (i + xOffset)];
}
}
__syncthreads();
if (i < width - _TILESIZE_ && j < height - _TILESIZE_ && xind > 0 && yind > 0 && xind < (_TILESIZE_2 - 1) && yind < (_TILESIZE_2 - 1))
{
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int xOffset = x * _TILESIZE_, yOffset = y * _TILESIZE_;
int sum1 = shMem[(xind + 1 + xOffset) + size * (yind - 1 + yOffset)] - shMem[(xind - 1 + xOffset) + size * (yind - 1 + yOffset)]
+ 2 * shMem[(xind + 1 + xOffset) + size * (yind + yOffset)] - 2 * shMem[(xind - 1 + xOffset) + size * (yind + yOffset)]
+ shMem[(xind + 1 + xOffset) + size * (yind + 1 + yOffset)] - shMem[(xind - 1 + xOffset) + size * (yind + 1 + yOffset)];
int sum2 = shMem[(xind - 1 + xOffset) + size * (yind - 1 + yOffset)] + 2 * shMem[(xind + xOffset) + size * (yind - 1 + yOffset)] + shMem[(xind + 1 + xOffset) + size * (yind - 1 + yOffset)]
- shMem[(xind - 1 + xOffset) + size * (yind + 1 + yOffset)] - 2 * shMem[(xind + xOffset) + size * (yind + 1 + yOffset)] - shMem[(xind + 1 + xOffset) + size * (yind + 1 + yOffset)];
int magnitude = sum1 * sum1 + sum2 * sum2;
int index = (j + yOffset) * width + (i + xOffset);
if(magnitude > thresh)
output[index] = 255;
else
output[index] = 0;
}
}
}
} |
99a12b78e233e8fbd52844ac91bb32be418edf58.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <fstream>
void checkCUDAError(const char *msg);
#include <hip/hip_runtime.h>
using namespace std;
// --------------------INPUT DATA---------------------
const int Nx = 24, Ny = 120, Nz = 20; // Number of mass points
float maxtime = 60; // End time [sec]
const int Nstep = 1200; // Number of time steps
__device__ __constant__ float dt = 0.05; // maxtime / Nstep; // Time step size [sec]
float dtcpu=0.05;
const int xlength = (4 + 2 * Nx)*(2 + Ny)*(2 + Nz); // Solution array in x-direction
const int ylength = (2 + Nx)*(4 + 2 * Ny)*(2 + Nz); // Solution array in y-direction
const int zlength = (2 + Nx)*(2 + Ny)*(4 + 2 * Nz); // Solution array in z-direction
const int masslength = Nx * Ny * Nz;
const int kxlength = Nz * Ny * (Nx + 1);
const int kylength = Nz * Nx * (Ny + 1);
const int kzlength = Ny * Nx * (Nz + 1);
const int bxlength = kxlength;
const int bylength = kylength;
const int bzlength = kzlength;
//------------------------DEVICE FUNCTIONS----------------------------//
//x-displacement
__device__ float fxx(int n, int i, int j, int k, float*xold)
{
return xold[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2*Nx + (i - 1)*(4 + 2*Nx) + 2 + (2*j - 1)-1];
}
//x-velocity
__device__ float fvx(int n, int i, int j, int k, float*xold)
{
return xold[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2*Nx + (i - 1)*(4 + 2*Nx) + 2 + (2*j)-1];
}
//y-displacement
__device__ float fyy(int n, int i, int j, int k, float*yold)
{
return yold[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i - 1)-1];
}
//y-velocity
__device__ float fvy(int n, int i, int j, int k, float*yold)
{
return yold[ (Nx + 2)*(4 + 2 * Ny) + (k - 1)*(Nx + 2)*(4 + 2 * Ny) + 4 +
2 * Ny + (j - 1)*(4 + 2 * Ny) + 2 + (2 * i)-1];
}
//z-displacement
__device__ float fzz(int n, int i, int j, int k, float*zold)
{
return zold[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k - 1)-1];
}
//z-velocity
__device__ float fvz(int n, int i, int j, int k, float*zold)
{
return zold[ (Nx + 2)*(4 + 2 * Nz) + (i - 1)*(Nx + 2)*(4 + 2 * Nz) + 4 +
2 * Nz + (j - 1)*(4 + 2 * Nz) + 2 + (2 * k)-1];
}
//mass
__device__ float fm(int i, int j, int k, float*m)
{
return m[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//x-stiffness
__device__ float fkx(int i, int j, int k, float*kx)
{
return kx[(k - 1)*Ny*(Nx + 1) + (i - 1)*(Nx + 1) + j-1];
}
//y-stiffness
__device__ float fky(int i, int j, int k, float*ky)
{
return ky[(k - 1)*Nx*(Ny + 1) + (i - 1)*Nx + j-1];
}
//z-stiffness
__device__ float fkz(int i, int j, int k, float*kz)
{
return kz[(k - 1)*Nx*Ny + (i - 1)*Nx + j-1];
}
//x-damping
__device__ float fbx(int i, int j, int k, float*bx)
{
return bx[(k - 1)*Ny*(Nx + 1) + (i - 1)*(Nx + 1) + j-1];
}
//y-damping
__device__ float fby(int i, int j, int k, float*by)
{
return by[(k - 1)*Nx*(Ny + 1) + (i - 1)*Nx + j-1];
}
//z-damping
__device__ float fbz(int i, int j, int k, float*bz)
{
return bz[(k - 1)*Nx*Ny + (i - 1)*Nx + j-1];
}
//x-force
__device__ float fFx(int i, int j, int k, float*Fx)
{
return Fx[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//y-force
__device__ float fFy(int i, int j, int k, float*Fy)
{
return Fy[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//z-force
__device__ float fFz(int i, int j, int k, float*Fz)
{
return Fz[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//x-acceleration
__device__ float ax(int i, int j, int k, float*Fx, float*xold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFx(i, j, k, Fx) - fby(i, j, k, by)*(-fvx(1, -1 + i, j, k, xold) + fvx(1, i, j, k, xold)) -
fbx(i, j, k, bx)*(-fvx(1, i, -1 + j, k, xold) + fvx(1, i, j, k, xold)) - fbz(i, j, k, bz)*(-fvx(1, i, j, -1 + k, xold) + fvx(1, i, j, k, xold)) +
fbz(i, j, 1 + k, bz)*(-fvx(1, i, j, k, xold) + fvx(1, i, j, 1 + k, xold)) +
fbx(i, 1 + j, k, bx)*(-fvx(1, i, j, k, xold) + fvx(1, i, 1 + j, k, xold)) +
fby(1 + i, j, k, by)*(-fvx(1, i, j, k, xold) + fvx(1, 1 + i, j, k, xold)) -
fky(i, j, k, ky)*(-fxx(1, -1 + i, j, k, xold) + fxx(1, i, j, k, xold)) - fkx(i, j, k, kx)*(-fxx(1, i, -1 + j, k, xold) + fxx(1, i, j, k, xold)) -
fkz(i, j, k, kz)*(-fxx(1, i, j, -1 + k, xold) + fxx(1, i, j, k, xold)) +
fkz(i, j, 1 + k, kz)*(-fxx(1, i, j, k, xold) + fxx(1, i, j, 1 + k, xold)) +
fkx(i, 1 + j, k, kx)*(-fxx(1, i, j, k, xold) + fxx(1, i, 1 + j, k, xold)) +
fky(1 + i, j, k, ky)*(-fxx(1, i, j, k, xold) + fxx(1, 1 + i, j, k, xold))) / fm(i, j, k, m);
}
//y-acceleration
__device__ float ay(int i, int j, int k, float*Fy, float*yold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFy(i, j, k, Fy) - fby(i, j, k, by)*(-fvy(1, -1 + i, j, k, yold) + fvy(1, i, j, k, yold)) -
fbx(i, j, k, bx)*(-fvy(1, i, -1 + j, k, yold) + fvy(1, i, j, k, yold)) - fbz(i, j, k, bz)*(-fvy(1, i, j, -1 + k, yold) + fvy(1, i, j, k, yold)) +
fbz(i, j, 1 + k, bz)*(-fvy(1, i, j, k, yold) + fvy(1, i, j, 1 + k, yold)) +
fbx(i, 1 + j, k, bx)*(-fvy(1, i, j, k, yold) + fvy(1, i, 1 + j, k, yold)) +
fby(1 + i, j, k, by)*(-fvy(1, i, j, k, yold) + fvy(1, 1 + i, j, k, yold)) -
fky(i, j, k, ky)*(-fyy(1, -1 + i, j, k, yold) + fyy(1, i, j, k, yold)) - fkx(i, j, k, kx)*(-fyy(1, i, -1 + j, k, yold) + fyy(1, i, j, k, yold)) -
fkz(i, j, k, kz)*(-fyy(1, i, j, -1 + k, yold) + fyy(1, i, j, k, yold)) +
fkz(i, j, 1 + k, kz)*(-fyy(1, i, j, k, yold) + fyy(1, i, j, 1 + k, yold)) +
fkx(i, 1 + j, k, kx)*(-fyy(1, i, j, k, yold) + fyy(1, i, 1 + j, k, yold)) +
fky(1 + i, j, k, ky)*(-fyy(1, i, j, k, yold) + fyy(1, 1 + i, j, k, yold))) / fm(i, j, k, m);
}
//z-acceleration
__device__ float az(int i, int j, int k, float*Fz, float*zold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFz(i, j, k, Fz) - fby(i, j, k, by)*(-fvz(1, -1 + i, j, k, zold) + fvz(1, i, j, k, zold)) -
fbx(i, j, k, bx)*(-fvz(1, i, -1 + j, k, zold) + fvz(1, i, j, k, zold)) - fbz(i, j, k, bz)*(-fvz(1, i, j, -1 + k, zold) + fvz(1, i, j, k, zold)) +
fbz(i, j, 1 + k, bz)*(-fvz(1, i, j, k, zold) + fvz(1, i, j, 1 + k, zold)) +
fbx(i, 1 + j, k, bx)*(-fvz(1, i, j, k, zold) + fvz(1, i, 1 + j, k, zold)) +
fby(1 + i, j, k, by)*(-fvz(1, i, j, k, zold) + fvz(1, 1 + i, j, k, zold)) -
fky(i, j, k, ky)*(-fzz(1, -1 + i, j, k, zold) + fzz(1, i, j, k, zold)) - fkx(i, j, k, kx)*(-fzz(1, i, -1 + j, k, zold) + fzz(1, i, j, k, zold)) -
fkz(i, j, k, kz)*(-fzz(1, i, j, -1 + k, zold) + fzz(1, i, j, k, zold)) +
fkz(i, j, 1 + k, kz)*(-fzz(1, i, j, k, zold) + fzz(1, i, j, 1 + k, zold)) +
fkx(i, 1 + j, k, kx)*(-fzz(1, i, j, k, zold) + fzz(1, i, 1 + j, k, zold)) +
fky(1 + i, j, k, ky)*(-fzz(1, i, j, k, zold) + fzz(1, 1 + i, j, k, zold))) / fm(i, j, k, m);
}
__global__ void SolveKernel(int dimBlockX,int dimBlockY,int dimBlockZ,float*xoldd,float*yoldd,float*zoldd,float*xnewd,float*ynewd,float*znewd,float*md,float*kxd,float*kyd,float*kzd,float*bxd,float*byd,float*bzd,float*Fxd,float*Fyd,float*Fzd)
{
// int tx=threadIdx.x;
// int ty=threadIdx.y;
int tx=blockIdx.x*dimBlockX+threadIdx.x;
int ty=blockIdx.y*dimBlockY+threadIdx.y;
int tz=blockIdx.z*dimBlockZ+threadIdx.z;
int i=ty+1;
int j=tx+1;
int k=tz+1;
xnewd[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2 * Nx + (i - 1)*(4 + 2 * Nx) + 2 + (2 * j - 1) - 1] = fxx(1, i, j, k, xoldd) + fvx(1, i, j, k, xoldd)*dt;
xnewd[ (Ny + 2)*(4 + 2 * Nx) + (k - 1)*(Ny + 2)*(4 + 2 * Nx) + 4 +
2 * Nx + (i - 1)*(4 + 2 * Nx) + 2 + (2 * j) - 1] = fvx(1, i, j, k, xoldd) + ax(i, j, k, Fxd, xoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
ynewd[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i - 1)-1] = fyy(1, i, j, k, yoldd) + fvy(1, i, j, k, yoldd)*dt;
ynewd[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i)-1] = fvy(1, i, j, k, yoldd) + ay(i, j, k, Fyd, yoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
znewd[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k - 1)-1] = fzz(1, i, j, k, zoldd) + fvz(1, i, j, k, zoldd)*dt;
znewd[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k)-1] = fvz(1, i, j, k, zoldd) + az(i, j, k, Fzd, zoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
}
void Solve(float*xold,float*yold,float*zold,float*xnew,float*ynew,float*znew,float*m,float*kx,float*ky,float*kz,float*bx,float*by,float*bz,float*Fx,float*Fy,float*Fz)
{
float *xoldd,*yoldd,*zoldd,*xnewd,*ynewd,*znewd,*md,*kxd,*kyd,*kzd,*bxd,*byd,*bzd,*Fxd,*Fyd,*Fzd;
int sizexoldd=xlength*sizeof(float);
hipMalloc((void**)&xoldd,sizexoldd);
hipMemcpy(xoldd,xold,sizexoldd,hipMemcpyHostToDevice);
int sizeyoldd=ylength*sizeof(float);
hipMalloc((void**)&yoldd,sizeyoldd);
hipMemcpy(yoldd,yold,sizeyoldd,hipMemcpyHostToDevice);
int sizezoldd=zlength*sizeof(float);
hipMalloc((void**)&zoldd,sizezoldd);
hipMemcpy(zoldd,zold,sizezoldd,hipMemcpyHostToDevice);
int sizexnewd=xlength*sizeof(float);
hipMalloc((void**)&xnewd,sizexnewd);
hipMemcpy(xnewd,xnew,sizexnewd,hipMemcpyHostToDevice);
int sizeynewd=ylength*sizeof(float);
hipMalloc((void**)&ynewd,sizeynewd);
hipMemcpy(ynewd,ynew,sizeynewd,hipMemcpyHostToDevice);
int sizeznewd=zlength*sizeof(float);
hipMalloc((void**)&znewd,sizeznewd);
hipMemcpy(znewd,znew,sizeznewd,hipMemcpyHostToDevice);
int sizemd=masslength*sizeof(float);
hipMalloc((void**)&md,sizemd);
hipMemcpy(md,m,sizemd,hipMemcpyHostToDevice);
int sizekxd=kxlength*sizeof(float);
hipMalloc((void**)&kxd,sizekxd);
hipMemcpy(kxd,kx,sizekxd,hipMemcpyHostToDevice);
int sizekyd=kylength*sizeof(float);
hipMalloc((void**)&kyd,sizekyd);
hipMemcpy(kyd,ky,sizekyd,hipMemcpyHostToDevice);
int sizekzd=kzlength*sizeof(float);
hipMalloc((void**)&kzd,sizekzd);
hipMemcpy(kzd,kz,sizekzd,hipMemcpyHostToDevice);
int sizebxd=bxlength*sizeof(float);
hipMalloc((void**)&bxd,sizebxd);
hipMemcpy(bxd,bx,sizebxd,hipMemcpyHostToDevice);
int sizebyd=bylength*sizeof(float);
hipMalloc((void**)&byd,sizebyd);
hipMemcpy(byd,by,sizebyd,hipMemcpyHostToDevice);
int sizebzd=bzlength*sizeof(float);
hipMalloc((void**)&bzd,sizebzd);
hipMemcpy(bzd,bz,sizebzd,hipMemcpyHostToDevice);
int sizeFxd=masslength*sizeof(float);
hipMalloc((void**)&Fxd,sizeFxd);
hipMemcpy(Fxd,Fx,sizeFxd,hipMemcpyHostToDevice);
int sizeFyd=masslength*sizeof(float);
hipMalloc((void**)&Fyd,sizeFyd);
hipMemcpy(Fyd,Fy,sizeFyd,hipMemcpyHostToDevice);
int sizeFzd=masslength*sizeof(float);
hipMalloc((void**)&Fzd,sizeFzd);
hipMemcpy(Fzd,Fz,sizeFzd,hipMemcpyHostToDevice);
//Malloc result
//hipMalloc((void**)&Pd,size);
//Dimensions of the run
//int SubMtxWidth=SubWidth;
int NBlockX=4;
int NBlockY=3;
int NBlockZ=5;
int dimBlockX=Nx/NBlockX;
int dimBlockY=Ny/NBlockY;
int dimBlockZ=Nz/NBlockZ;
dim3 dimBlock(dimBlockX,dimBlockY,dimBlockZ);
dim3 dimGrid(NBlockX,NBlockY,NBlockZ);
//Running Kernel
hipLaunchKernelGGL(( SolveKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dimBlockX,dimBlockY,dimBlockZ,xoldd,yoldd,zoldd,xnewd,ynewd,znewd,md,kxd,kyd,kzd,bxd,byd,bzd,Fxd,Fyd,Fzd);
hipDeviceSynchronize();
//Copy data back
hipMemcpy(xnew,xnewd,sizexnewd,hipMemcpyDeviceToHost);
hipMemcpy(ynew,ynewd,sizeynewd,hipMemcpyDeviceToHost);
hipMemcpy(znew,znewd,sizeznewd,hipMemcpyDeviceToHost);
checkCUDAError("memcpy");
//Free memory
//hipFree(Md);
//hipFree(Nd);
//hipFree(Pd);
//NEWSHIT
hipFree(xoldd);
hipFree(yoldd);
hipFree(zoldd);
hipFree(xnewd);
hipFree(ynewd);
hipFree(znewd);
hipFree(md);
hipFree(kxd);
hipFree(kyd);
hipFree(kzd);
hipFree(bxd);
hipFree(byd);
hipFree(bzd);
hipFree(Fxd);
hipFree(Fyd);
hipFree(Fzd);
}
int main(int argc,char* argv[])
{
float *xold,*yold,*zold,*xnew,*ynew,*znew,*m,*kx,*ky,*kz,*bx,*by,*bz,*Fx,*Fy,*Fz;
//----------------------------------INITIALIZATION START----------------------------------
// Solution vectors
xold=(float *)malloc(xlength*sizeof(float));
yold=(float *)malloc(ylength*sizeof(float));
zold=(float *)malloc(zlength*sizeof(float));
xnew=(float *)malloc(xlength*sizeof(float));
ynew=(float *)malloc(ylength*sizeof(float));
znew=(float *)malloc(zlength*sizeof(float));
// Mass vector
m=(float *)malloc(masslength*sizeof(float));
// Stiffness vectors
kx=(float *)malloc(kxlength*sizeof(float));
ky=(float *)malloc(kylength*sizeof(float));
kz=(float *)malloc(kzlength*sizeof(float));
// Damping vectors
bx=(float *)malloc(bxlength*sizeof(float));
by=(float *)malloc(bylength*sizeof(float));
bz=(float *)malloc(bzlength*sizeof(float));
// Force vectors
Fx=(float *)malloc(masslength*sizeof(float));
Fy=(float *)malloc(masslength*sizeof(float));
Fz=(float *)malloc(masslength*sizeof(float));
// Initial conditions
for (int i = 0; i < xlength ; i++)
{
xold[i]=0.0f;
xnew[i]=0.0f;
}
for (int i = 0; i < ylength ; i++)
{
yold[i]=0.0f;
ynew[i]=0.0f;
}
for (int i = 0; i < zlength ; i++)
{
zold[i]=0.0f;
znew[i]=0.0f;
}
// Mass [kg] and forces
for (int i = 0; i < masslength ; i++)
{
m[i]=1.0f;
Fx[i]=0.0f;
Fy[i]=0.0f;
Fz[i]=0.0f;
}
// Stiffness [N/m] and damping [N sec/m] in x-direction
for (int i = 0; i < kxlength ; i++)
{
kx[i]=0.2f;
bx[i]=0.05f;
}
// Stiffness [N/m] and damping [N sec/m] in y-direction
for (int i = 0; i < kylength ; i++)
{
ky[i]=0.2f;
by[i]=0.05f;
}
// Stiffness [N/m] and damping [N sec/m] in z-direction
for (int i = 0; i < kzlength ; i++)
{
kz[i]=0.2f;
bz[i]=0.05f;
}
//----------------------------------INITIALIZATION END--------------------------------------
//-------------------------------BOUNDARY CONDITIONS START----------------------------------
// No connections with Top wall B.C.'s
for (int i = 1; i <= Nx; i++)
{
for (int k = 1; k <= Nz; k++)
{
ky[i + Nx*Ny + (-1 + k)*Nx*(1 + Ny) - 1] = 0.0f;
by[i + Nx*Ny + (-1 + k)*Nx*(1 + Ny) - 1] = 0.0f;
}
}
//--------------------------------BOUNDARY CONDITIONS END-----------------------------------
//--------------------------------------SOLVER START-----------------------------------------
clock_t t;
t=clock();
for (int n = 1; n <= Nstep-1; n++)
{
// Excitation
Fx[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu); // omega = 3 [rad/sec]
Fy[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fx[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fy[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fx[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fy[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Solve(xold,yold,zold,xnew,ynew,znew,m,kx,ky,kz,bx,by,bz,Fx,Fy,Fz);
hipDeviceSynchronize();
// OLD=NEW
for (int ix = 0; ix < xlength; ix++)
{
xold[ix] = xnew[ix];
}
for (int iy = 0; iy < ylength; iy++)
{
yold[iy] = ynew[iy];
}
for (int iz = 0; iz < zlength; iz++)
{
zold[iz] = znew[iz];
}
}
ofstream fout("test.txt");
if (fout.is_open())
{
//file opened successfully so we are here
cout << "File Opened successfully!!!. Writing data from array to file" << endl;
for (int j = 0; j < zlength; j++)
{
fout << znew[j] << ' '; //writing ith character of array in the file
}
fout << '\n';
cout << "Array data successfully saved into the file test.txt" << endl;
}
else //file could not be opened
{
cout << "File could not be opened." << endl;
}
t=clock()-t;
printf("%f seconds\n",((float)t)/CLOCKS_PER_SEC);
printf("%f,%f,%f\n",xold[60],yold[60],zold[60]);
free(xold);
free(yold);
free(zold);
free(xnew);
free(ynew);
free(znew);
free(m);
free(kx);
free(ky);
free(kz);
free(bx);
free(by);
free(bz);
free(Fx);
free(Fy);
free(Fz);
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if(hipSuccess!= err)
{
fprintf(stderr,"Cuda error: %s: %s.\n",msg,hipGetErrorString(err));
exit(EXIT_FAILURE);
}
} | 99a12b78e233e8fbd52844ac91bb32be418edf58.cu | #include <stdio.h>
#include <time.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <fstream>
void checkCUDAError(const char *msg);
#include <cuda_runtime.h>
using namespace std;
// --------------------INPUT DATA---------------------
const int Nx = 24, Ny = 120, Nz = 20; // Number of mass points
float maxtime = 60; // End time [sec]
const int Nstep = 1200; // Number of time steps
__device__ __constant__ float dt = 0.05; // maxtime / Nstep; // Time step size [sec]
float dtcpu=0.05;
const int xlength = (4 + 2 * Nx)*(2 + Ny)*(2 + Nz); // Solution array in x-direction
const int ylength = (2 + Nx)*(4 + 2 * Ny)*(2 + Nz); // Solution array in y-direction
const int zlength = (2 + Nx)*(2 + Ny)*(4 + 2 * Nz); // Solution array in z-direction
const int masslength = Nx * Ny * Nz;
const int kxlength = Nz * Ny * (Nx + 1);
const int kylength = Nz * Nx * (Ny + 1);
const int kzlength = Ny * Nx * (Nz + 1);
const int bxlength = kxlength;
const int bylength = kylength;
const int bzlength = kzlength;
//------------------------DEVICE FUNCTIONS----------------------------//
//x-displacement
__device__ float fxx(int n, int i, int j, int k, float*xold)
{
return xold[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2*Nx + (i - 1)*(4 + 2*Nx) + 2 + (2*j - 1)-1];
}
//x-velocity
__device__ float fvx(int n, int i, int j, int k, float*xold)
{
return xold[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2*Nx + (i - 1)*(4 + 2*Nx) + 2 + (2*j)-1];
}
//y-displacement
__device__ float fyy(int n, int i, int j, int k, float*yold)
{
return yold[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i - 1)-1];
}
//y-velocity
__device__ float fvy(int n, int i, int j, int k, float*yold)
{
return yold[ (Nx + 2)*(4 + 2 * Ny) + (k - 1)*(Nx + 2)*(4 + 2 * Ny) + 4 +
2 * Ny + (j - 1)*(4 + 2 * Ny) + 2 + (2 * i)-1];
}
//z-displacement
__device__ float fzz(int n, int i, int j, int k, float*zold)
{
return zold[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k - 1)-1];
}
//z-velocity
__device__ float fvz(int n, int i, int j, int k, float*zold)
{
return zold[ (Nx + 2)*(4 + 2 * Nz) + (i - 1)*(Nx + 2)*(4 + 2 * Nz) + 4 +
2 * Nz + (j - 1)*(4 + 2 * Nz) + 2 + (2 * k)-1];
}
//mass
__device__ float fm(int i, int j, int k, float*m)
{
return m[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//x-stiffness
__device__ float fkx(int i, int j, int k, float*kx)
{
return kx[(k - 1)*Ny*(Nx + 1) + (i - 1)*(Nx + 1) + j-1];
}
//y-stiffness
__device__ float fky(int i, int j, int k, float*ky)
{
return ky[(k - 1)*Nx*(Ny + 1) + (i - 1)*Nx + j-1];
}
//z-stiffness
__device__ float fkz(int i, int j, int k, float*kz)
{
return kz[(k - 1)*Nx*Ny + (i - 1)*Nx + j-1];
}
//x-damping
__device__ float fbx(int i, int j, int k, float*bx)
{
return bx[(k - 1)*Ny*(Nx + 1) + (i - 1)*(Nx + 1) + j-1];
}
//y-damping
__device__ float fby(int i, int j, int k, float*by)
{
return by[(k - 1)*Nx*(Ny + 1) + (i - 1)*Nx + j-1];
}
//z-damping
__device__ float fbz(int i, int j, int k, float*bz)
{
return bz[(k - 1)*Nx*Ny + (i - 1)*Nx + j-1];
}
//x-force
__device__ float fFx(int i, int j, int k, float*Fx)
{
return Fx[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//y-force
__device__ float fFy(int i, int j, int k, float*Fy)
{
return Fy[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//z-force
__device__ float fFz(int i, int j, int k, float*Fz)
{
return Fz[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//x-acceleration
__device__ float ax(int i, int j, int k, float*Fx, float*xold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFx(i, j, k, Fx) - fby(i, j, k, by)*(-fvx(1, -1 + i, j, k, xold) + fvx(1, i, j, k, xold)) -
fbx(i, j, k, bx)*(-fvx(1, i, -1 + j, k, xold) + fvx(1, i, j, k, xold)) - fbz(i, j, k, bz)*(-fvx(1, i, j, -1 + k, xold) + fvx(1, i, j, k, xold)) +
fbz(i, j, 1 + k, bz)*(-fvx(1, i, j, k, xold) + fvx(1, i, j, 1 + k, xold)) +
fbx(i, 1 + j, k, bx)*(-fvx(1, i, j, k, xold) + fvx(1, i, 1 + j, k, xold)) +
fby(1 + i, j, k, by)*(-fvx(1, i, j, k, xold) + fvx(1, 1 + i, j, k, xold)) -
fky(i, j, k, ky)*(-fxx(1, -1 + i, j, k, xold) + fxx(1, i, j, k, xold)) - fkx(i, j, k, kx)*(-fxx(1, i, -1 + j, k, xold) + fxx(1, i, j, k, xold)) -
fkz(i, j, k, kz)*(-fxx(1, i, j, -1 + k, xold) + fxx(1, i, j, k, xold)) +
fkz(i, j, 1 + k, kz)*(-fxx(1, i, j, k, xold) + fxx(1, i, j, 1 + k, xold)) +
fkx(i, 1 + j, k, kx)*(-fxx(1, i, j, k, xold) + fxx(1, i, 1 + j, k, xold)) +
fky(1 + i, j, k, ky)*(-fxx(1, i, j, k, xold) + fxx(1, 1 + i, j, k, xold))) / fm(i, j, k, m);
}
//y-acceleration
__device__ float ay(int i, int j, int k, float*Fy, float*yold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFy(i, j, k, Fy) - fby(i, j, k, by)*(-fvy(1, -1 + i, j, k, yold) + fvy(1, i, j, k, yold)) -
fbx(i, j, k, bx)*(-fvy(1, i, -1 + j, k, yold) + fvy(1, i, j, k, yold)) - fbz(i, j, k, bz)*(-fvy(1, i, j, -1 + k, yold) + fvy(1, i, j, k, yold)) +
fbz(i, j, 1 + k, bz)*(-fvy(1, i, j, k, yold) + fvy(1, i, j, 1 + k, yold)) +
fbx(i, 1 + j, k, bx)*(-fvy(1, i, j, k, yold) + fvy(1, i, 1 + j, k, yold)) +
fby(1 + i, j, k, by)*(-fvy(1, i, j, k, yold) + fvy(1, 1 + i, j, k, yold)) -
fky(i, j, k, ky)*(-fyy(1, -1 + i, j, k, yold) + fyy(1, i, j, k, yold)) - fkx(i, j, k, kx)*(-fyy(1, i, -1 + j, k, yold) + fyy(1, i, j, k, yold)) -
fkz(i, j, k, kz)*(-fyy(1, i, j, -1 + k, yold) + fyy(1, i, j, k, yold)) +
fkz(i, j, 1 + k, kz)*(-fyy(1, i, j, k, yold) + fyy(1, i, j, 1 + k, yold)) +
fkx(i, 1 + j, k, kx)*(-fyy(1, i, j, k, yold) + fyy(1, i, 1 + j, k, yold)) +
fky(1 + i, j, k, ky)*(-fyy(1, i, j, k, yold) + fyy(1, 1 + i, j, k, yold))) / fm(i, j, k, m);
}
//z-acceleration
__device__ float az(int i, int j, int k, float*Fz, float*zold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFz(i, j, k, Fz) - fby(i, j, k, by)*(-fvz(1, -1 + i, j, k, zold) + fvz(1, i, j, k, zold)) -
fbx(i, j, k, bx)*(-fvz(1, i, -1 + j, k, zold) + fvz(1, i, j, k, zold)) - fbz(i, j, k, bz)*(-fvz(1, i, j, -1 + k, zold) + fvz(1, i, j, k, zold)) +
fbz(i, j, 1 + k, bz)*(-fvz(1, i, j, k, zold) + fvz(1, i, j, 1 + k, zold)) +
fbx(i, 1 + j, k, bx)*(-fvz(1, i, j, k, zold) + fvz(1, i, 1 + j, k, zold)) +
fby(1 + i, j, k, by)*(-fvz(1, i, j, k, zold) + fvz(1, 1 + i, j, k, zold)) -
fky(i, j, k, ky)*(-fzz(1, -1 + i, j, k, zold) + fzz(1, i, j, k, zold)) - fkx(i, j, k, kx)*(-fzz(1, i, -1 + j, k, zold) + fzz(1, i, j, k, zold)) -
fkz(i, j, k, kz)*(-fzz(1, i, j, -1 + k, zold) + fzz(1, i, j, k, zold)) +
fkz(i, j, 1 + k, kz)*(-fzz(1, i, j, k, zold) + fzz(1, i, j, 1 + k, zold)) +
fkx(i, 1 + j, k, kx)*(-fzz(1, i, j, k, zold) + fzz(1, i, 1 + j, k, zold)) +
fky(1 + i, j, k, ky)*(-fzz(1, i, j, k, zold) + fzz(1, 1 + i, j, k, zold))) / fm(i, j, k, m);
}
__global__ void SolveKernel(int dimBlockX,int dimBlockY,int dimBlockZ,float*xoldd,float*yoldd,float*zoldd,float*xnewd,float*ynewd,float*znewd,float*md,float*kxd,float*kyd,float*kzd,float*bxd,float*byd,float*bzd,float*Fxd,float*Fyd,float*Fzd)
{
// int tx=threadIdx.x;
// int ty=threadIdx.y;
int tx=blockIdx.x*dimBlockX+threadIdx.x;
int ty=blockIdx.y*dimBlockY+threadIdx.y;
int tz=blockIdx.z*dimBlockZ+threadIdx.z;
int i=ty+1;
int j=tx+1;
int k=tz+1;
xnewd[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2 * Nx + (i - 1)*(4 + 2 * Nx) + 2 + (2 * j - 1) - 1] = fxx(1, i, j, k, xoldd) + fvx(1, i, j, k, xoldd)*dt;
xnewd[ (Ny + 2)*(4 + 2 * Nx) + (k - 1)*(Ny + 2)*(4 + 2 * Nx) + 4 +
2 * Nx + (i - 1)*(4 + 2 * Nx) + 2 + (2 * j) - 1] = fvx(1, i, j, k, xoldd) + ax(i, j, k, Fxd, xoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
ynewd[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i - 1)-1] = fyy(1, i, j, k, yoldd) + fvy(1, i, j, k, yoldd)*dt;
ynewd[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i)-1] = fvy(1, i, j, k, yoldd) + ay(i, j, k, Fyd, yoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
znewd[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k - 1)-1] = fzz(1, i, j, k, zoldd) + fvz(1, i, j, k, zoldd)*dt;
znewd[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k)-1] = fvz(1, i, j, k, zoldd) + az(i, j, k, Fzd, zoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
}
void Solve(float*xold,float*yold,float*zold,float*xnew,float*ynew,float*znew,float*m,float*kx,float*ky,float*kz,float*bx,float*by,float*bz,float*Fx,float*Fy,float*Fz)
{
float *xoldd,*yoldd,*zoldd,*xnewd,*ynewd,*znewd,*md,*kxd,*kyd,*kzd,*bxd,*byd,*bzd,*Fxd,*Fyd,*Fzd;
int sizexoldd=xlength*sizeof(float);
cudaMalloc((void**)&xoldd,sizexoldd);
cudaMemcpy(xoldd,xold,sizexoldd,cudaMemcpyHostToDevice);
int sizeyoldd=ylength*sizeof(float);
cudaMalloc((void**)&yoldd,sizeyoldd);
cudaMemcpy(yoldd,yold,sizeyoldd,cudaMemcpyHostToDevice);
int sizezoldd=zlength*sizeof(float);
cudaMalloc((void**)&zoldd,sizezoldd);
cudaMemcpy(zoldd,zold,sizezoldd,cudaMemcpyHostToDevice);
int sizexnewd=xlength*sizeof(float);
cudaMalloc((void**)&xnewd,sizexnewd);
cudaMemcpy(xnewd,xnew,sizexnewd,cudaMemcpyHostToDevice);
int sizeynewd=ylength*sizeof(float);
cudaMalloc((void**)&ynewd,sizeynewd);
cudaMemcpy(ynewd,ynew,sizeynewd,cudaMemcpyHostToDevice);
int sizeznewd=zlength*sizeof(float);
cudaMalloc((void**)&znewd,sizeznewd);
cudaMemcpy(znewd,znew,sizeznewd,cudaMemcpyHostToDevice);
int sizemd=masslength*sizeof(float);
cudaMalloc((void**)&md,sizemd);
cudaMemcpy(md,m,sizemd,cudaMemcpyHostToDevice);
int sizekxd=kxlength*sizeof(float);
cudaMalloc((void**)&kxd,sizekxd);
cudaMemcpy(kxd,kx,sizekxd,cudaMemcpyHostToDevice);
int sizekyd=kylength*sizeof(float);
cudaMalloc((void**)&kyd,sizekyd);
cudaMemcpy(kyd,ky,sizekyd,cudaMemcpyHostToDevice);
int sizekzd=kzlength*sizeof(float);
cudaMalloc((void**)&kzd,sizekzd);
cudaMemcpy(kzd,kz,sizekzd,cudaMemcpyHostToDevice);
int sizebxd=bxlength*sizeof(float);
cudaMalloc((void**)&bxd,sizebxd);
cudaMemcpy(bxd,bx,sizebxd,cudaMemcpyHostToDevice);
int sizebyd=bylength*sizeof(float);
cudaMalloc((void**)&byd,sizebyd);
cudaMemcpy(byd,by,sizebyd,cudaMemcpyHostToDevice);
int sizebzd=bzlength*sizeof(float);
cudaMalloc((void**)&bzd,sizebzd);
cudaMemcpy(bzd,bz,sizebzd,cudaMemcpyHostToDevice);
int sizeFxd=masslength*sizeof(float);
cudaMalloc((void**)&Fxd,sizeFxd);
cudaMemcpy(Fxd,Fx,sizeFxd,cudaMemcpyHostToDevice);
int sizeFyd=masslength*sizeof(float);
cudaMalloc((void**)&Fyd,sizeFyd);
cudaMemcpy(Fyd,Fy,sizeFyd,cudaMemcpyHostToDevice);
int sizeFzd=masslength*sizeof(float);
cudaMalloc((void**)&Fzd,sizeFzd);
cudaMemcpy(Fzd,Fz,sizeFzd,cudaMemcpyHostToDevice);
//Malloc result
//cudaMalloc((void**)&Pd,size);
//Dimensions of the run
//int SubMtxWidth=SubWidth;
int NBlockX=4;
int NBlockY=3;
int NBlockZ=5;
int dimBlockX=Nx/NBlockX;
int dimBlockY=Ny/NBlockY;
int dimBlockZ=Nz/NBlockZ;
dim3 dimBlock(dimBlockX,dimBlockY,dimBlockZ);
dim3 dimGrid(NBlockX,NBlockY,NBlockZ);
//Running Kernel
SolveKernel<<<dimGrid,dimBlock>>>(dimBlockX,dimBlockY,dimBlockZ,xoldd,yoldd,zoldd,xnewd,ynewd,znewd,md,kxd,kyd,kzd,bxd,byd,bzd,Fxd,Fyd,Fzd);
cudaThreadSynchronize();
//Copy data back
cudaMemcpy(xnew,xnewd,sizexnewd,cudaMemcpyDeviceToHost);
cudaMemcpy(ynew,ynewd,sizeynewd,cudaMemcpyDeviceToHost);
cudaMemcpy(znew,znewd,sizeznewd,cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
//Free memory
//cudaFree(Md);
//cudaFree(Nd);
//cudaFree(Pd);
//NEWSHIT
cudaFree(xoldd);
cudaFree(yoldd);
cudaFree(zoldd);
cudaFree(xnewd);
cudaFree(ynewd);
cudaFree(znewd);
cudaFree(md);
cudaFree(kxd);
cudaFree(kyd);
cudaFree(kzd);
cudaFree(bxd);
cudaFree(byd);
cudaFree(bzd);
cudaFree(Fxd);
cudaFree(Fyd);
cudaFree(Fzd);
}
int main(int argc,char* argv[])
{
float *xold,*yold,*zold,*xnew,*ynew,*znew,*m,*kx,*ky,*kz,*bx,*by,*bz,*Fx,*Fy,*Fz;
//----------------------------------INITIALIZATION START----------------------------------
// Solution vectors
xold=(float *)malloc(xlength*sizeof(float));
yold=(float *)malloc(ylength*sizeof(float));
zold=(float *)malloc(zlength*sizeof(float));
xnew=(float *)malloc(xlength*sizeof(float));
ynew=(float *)malloc(ylength*sizeof(float));
znew=(float *)malloc(zlength*sizeof(float));
// Mass vector
m=(float *)malloc(masslength*sizeof(float));
// Stiffness vectors
kx=(float *)malloc(kxlength*sizeof(float));
ky=(float *)malloc(kylength*sizeof(float));
kz=(float *)malloc(kzlength*sizeof(float));
// Damping vectors
bx=(float *)malloc(bxlength*sizeof(float));
by=(float *)malloc(bylength*sizeof(float));
bz=(float *)malloc(bzlength*sizeof(float));
// Force vectors
Fx=(float *)malloc(masslength*sizeof(float));
Fy=(float *)malloc(masslength*sizeof(float));
Fz=(float *)malloc(masslength*sizeof(float));
// Initial conditions
for (int i = 0; i < xlength ; i++)
{
xold[i]=0.0f;
xnew[i]=0.0f;
}
for (int i = 0; i < ylength ; i++)
{
yold[i]=0.0f;
ynew[i]=0.0f;
}
for (int i = 0; i < zlength ; i++)
{
zold[i]=0.0f;
znew[i]=0.0f;
}
// Mass [kg] and forces
for (int i = 0; i < masslength ; i++)
{
m[i]=1.0f;
Fx[i]=0.0f;
Fy[i]=0.0f;
Fz[i]=0.0f;
}
// Stiffness [N/m] and damping [N sec/m] in x-direction
for (int i = 0; i < kxlength ; i++)
{
kx[i]=0.2f;
bx[i]=0.05f;
}
// Stiffness [N/m] and damping [N sec/m] in y-direction
for (int i = 0; i < kylength ; i++)
{
ky[i]=0.2f;
by[i]=0.05f;
}
// Stiffness [N/m] and damping [N sec/m] in z-direction
for (int i = 0; i < kzlength ; i++)
{
kz[i]=0.2f;
bz[i]=0.05f;
}
//----------------------------------INITIALIZATION END--------------------------------------
//-------------------------------BOUNDARY CONDITIONS START----------------------------------
// No connections with Top wall B.C.'s
for (int i = 1; i <= Nx; i++)
{
for (int k = 1; k <= Nz; k++)
{
ky[i + Nx*Ny + (-1 + k)*Nx*(1 + Ny) - 1] = 0.0f;
by[i + Nx*Ny + (-1 + k)*Nx*(1 + Ny) - 1] = 0.0f;
}
}
//--------------------------------BOUNDARY CONDITIONS END-----------------------------------
//--------------------------------------SOLVER START-----------------------------------------
clock_t t;
t=clock();
for (int n = 1; n <= Nstep-1; n++)
{
// Excitation
Fx[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu); // omega = 3 [rad/sec]
Fy[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fx[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fy[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fx[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fy[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Solve(xold,yold,zold,xnew,ynew,znew,m,kx,ky,kz,bx,by,bz,Fx,Fy,Fz);
cudaThreadSynchronize();
// OLD=NEW
for (int ix = 0; ix < xlength; ix++)
{
xold[ix] = xnew[ix];
}
for (int iy = 0; iy < ylength; iy++)
{
yold[iy] = ynew[iy];
}
for (int iz = 0; iz < zlength; iz++)
{
zold[iz] = znew[iz];
}
}
ofstream fout("test.txt");
if (fout.is_open())
{
//file opened successfully so we are here
cout << "File Opened successfully!!!. Writing data from array to file" << endl;
for (int j = 0; j < zlength; j++)
{
fout << znew[j] << ' '; //writing ith character of array in the file
}
fout << '\n';
cout << "Array data successfully saved into the file test.txt" << endl;
}
else //file could not be opened
{
cout << "File could not be opened." << endl;
}
t=clock()-t;
printf("%f seconds\n",((float)t)/CLOCKS_PER_SEC);
printf("%f,%f,%f\n",xold[60],yold[60],zold[60]);
free(xold);
free(yold);
free(zold);
free(xnew);
free(ynew);
free(znew);
free(m);
free(kx);
free(ky);
free(kz);
free(bx);
free(by);
free(bz);
free(Fx);
free(Fy);
free(Fz);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if(cudaSuccess!= err)
{
fprintf(stderr,"Cuda error: %s: %s.\n",msg,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} |
5fc22ec6624d31dac3cdd01e43840b77af0a1468.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/groupby.hpp>
#include <cudf/detail/groupby.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/detail/aggregation.hpp>
#include <memory>
#include <utility>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
namespace hash {
namespace {
/**
* @brief List of aggregation operations that can be computed with a hash-based
* implementation.
*/
static constexpr std::array<aggregation::Kind, 5> hash_aggregations{
aggregation::SUM, aggregation::MIN, aggregation::MAX, aggregation::COUNT,
aggregation::MEAN};
template <class T, size_t N>
constexpr bool array_contains(std::array<T, N> const& haystack, T needle) {
for (auto i = 0u; i < N; ++i) {
if (haystack[i] == needle) return true;
}
return false;
}
/**
* @brief Indicates whether the specified aggregation operation can be computed
* with a hash-based implementation.
*
* @param t The aggregation operation to verify
* @return true `t` is valid for a hash based groupby
* @return false `t` is invalid for a hash based groupby
*/
constexpr bool is_hash_aggregation(aggregation::Kind t) {
return array_contains(hash_aggregations, t);
}
} // namespace
/**
* @brief Indicates if a set of aggregation requests can be satisfied with a
* hash-based groupby implementation.
*
* @param keys The table of keys
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @return true A hash-based groupby should be used
* @return false A hash-based groupby should not be used
*/
bool can_use_hash_groupby(table_view const& keys,
std::vector<aggregation_request> const& requests) {
return std::all_of(
requests.begin(), requests.end(), [](aggregation_request const& r) {
return std::all_of(
r.aggregations.begin(), r.aggregations.end(),
[](auto const& a) { return is_hash_aggregation(a->kind); });
});
}
// Hash-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> groupby(
table_view const& keys, std::vector<aggregation_request> const& requests,
bool ignore_null_keys, hipStream_t stream,
rmm::mr::device_memory_resource* mr) {
// stub
return std::make_pair(std::make_unique<table>(),
std::vector<aggregation_result>{});
}
} // namespace hash
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
| 5fc22ec6624d31dac3cdd01e43840b77af0a1468.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/groupby.hpp>
#include <cudf/detail/groupby.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/detail/aggregation.hpp>
#include <memory>
#include <utility>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
namespace hash {
namespace {
/**
* @brief List of aggregation operations that can be computed with a hash-based
* implementation.
*/
static constexpr std::array<aggregation::Kind, 5> hash_aggregations{
aggregation::SUM, aggregation::MIN, aggregation::MAX, aggregation::COUNT,
aggregation::MEAN};
template <class T, size_t N>
constexpr bool array_contains(std::array<T, N> const& haystack, T needle) {
for (auto i = 0u; i < N; ++i) {
if (haystack[i] == needle) return true;
}
return false;
}
/**
* @brief Indicates whether the specified aggregation operation can be computed
* with a hash-based implementation.
*
* @param t The aggregation operation to verify
* @return true `t` is valid for a hash based groupby
* @return false `t` is invalid for a hash based groupby
*/
constexpr bool is_hash_aggregation(aggregation::Kind t) {
return array_contains(hash_aggregations, t);
}
} // namespace
/**
* @brief Indicates if a set of aggregation requests can be satisfied with a
* hash-based groupby implementation.
*
* @param keys The table of keys
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @return true A hash-based groupby should be used
* @return false A hash-based groupby should not be used
*/
bool can_use_hash_groupby(table_view const& keys,
std::vector<aggregation_request> const& requests) {
return std::all_of(
requests.begin(), requests.end(), [](aggregation_request const& r) {
return std::all_of(
r.aggregations.begin(), r.aggregations.end(),
[](auto const& a) { return is_hash_aggregation(a->kind); });
});
}
// Hash-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> groupby(
table_view const& keys, std::vector<aggregation_request> const& requests,
bool ignore_null_keys, cudaStream_t stream,
rmm::mr::device_memory_resource* mr) {
// stub
return std::make_pair(std::make_unique<table>(),
std::vector<aggregation_result>{});
}
} // namespace hash
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
|
c5ef1e899e3efbcec37588181e5bf22905b632f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void add(int *a, int *b, int *c){
*c=*a+*b;
}
int main(){
int i=5,j=10,res;
int *dev_i,*dev_j,*dev_res;
hipMalloc((void **)&dev_i,sizeof(int));
hipMalloc((void **)&dev_j,sizeof(int));
hipMalloc((void **)&dev_res,sizeof(int));
hipMemcpy(dev_i,&i,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_j,&j,sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, dev_i,dev_j,dev_res);
hipMemcpy(&res,dev_res,sizeof(int),hipMemcpyDeviceToHost);
printf("The result from CPU is %d\n\tFrom GPU is %d",(i+j),res);
return 0;
} | c5ef1e899e3efbcec37588181e5bf22905b632f1.cu | #include<stdio.h>
__global__ void add(int *a, int *b, int *c){
*c=*a+*b;
}
int main(){
int i=5,j=10,res;
int *dev_i,*dev_j,*dev_res;
cudaMalloc((void **)&dev_i,sizeof(int));
cudaMalloc((void **)&dev_j,sizeof(int));
cudaMalloc((void **)&dev_res,sizeof(int));
cudaMemcpy(dev_i,&i,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_j,&j,sizeof(int),cudaMemcpyHostToDevice);
add<<<1,1>>>(dev_i,dev_j,dev_res);
cudaMemcpy(&res,dev_res,sizeof(int),cudaMemcpyDeviceToHost);
printf("The result from CPU is %d\n\tFrom GPU is %d",(i+j),res);
return 0;
} |
bb4589e613fd38a32d33ff5889dd3c5433e53280.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define LINE 100000
void readfile(int num[LINE]){
int temp;
int i;
FILE *fp;
fp = fopen("number.txt", "r");
i = 0;
if(fp == NULL){
printf("Error loading file!!\n");
exit(1);
}else{
while(!feof(fp)){
fscanf(fp, "%d", &temp);
num[i] = temp;
i++;
}
}
fclose(fp);
}
void printfile(int num[LINE]){
int i;
FILE *fp = fopen("update.txt", "w");
for (i = 0; i < LINE; i++)
fprintf(fp, "%d ", num[i]);
fclose(fp);
}
void copyData(int num[LINE], int num1[LINE]){
int i;
for(i = 0; i < LINE; i++)
num1[i] = num[i];
}
__global__ void even(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
__global__ void odd(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2 + 1;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
void docuda(int *dnum, int line){
int i;
for(i = 0; i < line; i++){
hipLaunchKernelGGL(( even), dim3(LINE), dim3(256), 0, 0, dnum, line);
hipLaunchKernelGGL(( odd), dim3(LINE), dim3(256), 0, 0, dnum, line);
}
}
void cuda(int num[LINE], int num1[LINE]){
int line, i;
int *dnum;
struct timeval tv;
struct timezone tz;
double start, end, time, time1, time2, average;
start = 0;
end = 0;
time = 0;
time1 = 0;
time2 = 0;
line = 10000;
average = 0;
printf("Time execution for parallel bubble sort using CUDA using 100k block and 256 threads\n");
printf("================================================================================\n");
printf(" Number of data 1st time 2nd time 3rd time average \n");
printf("================================================================================\n");
while (line <= LINE){
for (i = 0; i < 3; i++){
copyData(num, num1);
hipMalloc(&dnum, LINE*sizeof(int));
hipMemcpy(dnum, num, LINE*sizeof(int), hipMemcpyHostToDevice);
gettimeofday(&tv, &tz);
start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
docuda(dnum, line);
gettimeofday(&tv, &tz);
end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
hipMemcpy(num, dnum, LINE*sizeof(int), hipMemcpyDeviceToHost);
if (i == 0)
time = end - start;
else if (i == 1)
time1 = end - start;
else if (i == 2)
time2 = end - start;
}
average = (time + time1 + time2) / 3;
printf(" %i %fs %fs %fs %fs\n", line, time, time1, time2, average);
line += 10000;
}
}
int main(){
int num[LINE];
int num1[LINE];
printf("Getting data...\n");
readfile(num);
printf("Sorting data...\n\n");
cuda(num, num1);
printfile(num);
printf("\nParallel bubble sort in CUDA sucessfully.\n");
return 0;
} | bb4589e613fd38a32d33ff5889dd3c5433e53280.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#define LINE 100000
void readfile(int num[LINE]){
int temp;
int i;
FILE *fp;
fp = fopen("number.txt", "r");
i = 0;
if(fp == NULL){
printf("Error loading file!!\n");
exit(1);
}else{
while(!feof(fp)){
fscanf(fp, "%d", &temp);
num[i] = temp;
i++;
}
}
fclose(fp);
}
void printfile(int num[LINE]){
int i;
FILE *fp = fopen("update.txt", "w");
for (i = 0; i < LINE; i++)
fprintf(fp, "%d ", num[i]);
fclose(fp);
}
void copyData(int num[LINE], int num1[LINE]){
int i;
for(i = 0; i < LINE; i++)
num1[i] = num[i];
}
__global__ void even(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
__global__ void odd(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2 + 1;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
void docuda(int *dnum, int line){
int i;
for(i = 0; i < line; i++){
even<<<LINE, 256>>>(dnum, line);
odd<<<LINE, 256>>>(dnum, line);
}
}
void cuda(int num[LINE], int num1[LINE]){
int line, i;
int *dnum;
struct timeval tv;
struct timezone tz;
double start, end, time, time1, time2, average;
start = 0;
end = 0;
time = 0;
time1 = 0;
time2 = 0;
line = 10000;
average = 0;
printf("Time execution for parallel bubble sort using CUDA using 100k block and 256 threads\n");
printf("================================================================================\n");
printf(" Number of data 1st time 2nd time 3rd time average \n");
printf("================================================================================\n");
while (line <= LINE){
for (i = 0; i < 3; i++){
copyData(num, num1);
cudaMalloc(&dnum, LINE*sizeof(int));
cudaMemcpy(dnum, num, LINE*sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&tv, &tz);
start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
docuda(dnum, line);
gettimeofday(&tv, &tz);
end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
cudaMemcpy(num, dnum, LINE*sizeof(int), cudaMemcpyDeviceToHost);
if (i == 0)
time = end - start;
else if (i == 1)
time1 = end - start;
else if (i == 2)
time2 = end - start;
}
average = (time + time1 + time2) / 3;
printf(" %i %fs %fs %fs %fs\n", line, time, time1, time2, average);
line += 10000;
}
}
int main(){
int num[LINE];
int num1[LINE];
printf("Getting data...\n");
readfile(num);
printf("Sorting data...\n\n");
cuda(num, num1);
printfile(num);
printf("\nParallel bubble sort in CUDA sucessfully.\n");
return 0;
} |
d00553dfac74403890ea9bb7c0fb91e0f81899b0.hip | // !!! This is a file automatically generated by hipify!!!
#include <gputk.h>
#define gpuTKCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
gpuTKLog(ERROR, "Failed to run stmt ", #stmt); \
gpuTKLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
#define BLUR_SIZE 5
//@@ INSERT CODE HERE
int main(int argc, char *argv[]) {
gpuTKArg_t args;
int imageWidth;
int imageHeight;
char *inputImageFile;
gpuTKImage_t inputImage;
gpuTKImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
args = gpuTKArg_read(argc, argv); /* parse the input arguments */
inputImageFile = gpuTKArg_getInputFile(args, 0);
inputImage = gpuTKImport(inputImageFile);
// The input image is in grayscale, so the number of channels
// is 1
imageWidth = gpuTKImage_getWidth(inputImage);
imageHeight = gpuTKImage_getHeight(inputImage);
// Since the image is monochromatic, it only contains only one channel
outputImage = gpuTKImage_new(imageWidth, imageHeight, 1);
hostInputImageData = gpuTKImage_getData(inputImage);
hostOutputImageData = gpuTKImage_getData(outputImage);
gpuTKTime_start(GPU, "Doing GPU Computation (memory + compute)");
gpuTKTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * sizeof(float));
hipMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(float));
gpuTKTime_stop(GPU, "Doing GPU memory allocation");
gpuTKTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * sizeof(float),
hipMemcpyHostToDevice);
gpuTKTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
gpuTKTime_start(Compute, "Doing the computation on the GPU");
gpuTKTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
gpuTKTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
hipMemcpyDeviceToHost);
gpuTKTime_stop(Copy, "Copying data from the GPU");
gpuTKTime_stop(GPU, "Doing GPU Computation (memory + compute)");
gpuTKSolution(args, outputImage);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
gpuTKImage_delete(outputImage);
gpuTKImage_delete(inputImage);
return 0;
}
| d00553dfac74403890ea9bb7c0fb91e0f81899b0.cu | #include <gputk.h>
#define gpuTKCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
gpuTKLog(ERROR, "Failed to run stmt ", #stmt); \
gpuTKLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
#define BLUR_SIZE 5
//@@ INSERT CODE HERE
int main(int argc, char *argv[]) {
gpuTKArg_t args;
int imageWidth;
int imageHeight;
char *inputImageFile;
gpuTKImage_t inputImage;
gpuTKImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
args = gpuTKArg_read(argc, argv); /* parse the input arguments */
inputImageFile = gpuTKArg_getInputFile(args, 0);
inputImage = gpuTKImport(inputImageFile);
// The input image is in grayscale, so the number of channels
// is 1
imageWidth = gpuTKImage_getWidth(inputImage);
imageHeight = gpuTKImage_getHeight(inputImage);
// Since the image is monochromatic, it only contains only one channel
outputImage = gpuTKImage_new(imageWidth, imageHeight, 1);
hostInputImageData = gpuTKImage_getData(inputImage);
hostOutputImageData = gpuTKImage_getData(outputImage);
gpuTKTime_start(GPU, "Doing GPU Computation (memory + compute)");
gpuTKTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(float));
gpuTKTime_stop(GPU, "Doing GPU memory allocation");
gpuTKTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * sizeof(float),
cudaMemcpyHostToDevice);
gpuTKTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
gpuTKTime_start(Compute, "Doing the computation on the GPU");
gpuTKTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
gpuTKTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
cudaMemcpyDeviceToHost);
gpuTKTime_stop(Copy, "Copying data from the GPU");
gpuTKTime_stop(GPU, "Doing GPU Computation (memory + compute)");
gpuTKSolution(args, outputImage);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
gpuTKImage_delete(outputImage);
gpuTKImage_delete(inputImage);
return 0;
}
|
598cea3c9f13a7334b8aed07be98adc92b592465.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "convolutionSeparable_common.h"
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel[KERNEL_LENGTH];
extern "C" void setConvolutionKernel(float *h_Kernel)
{
hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float));
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 4
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Load main data
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0);
assert(imageH % ROWS_BLOCKDIM_Y == 0);
dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
getLastCudaError("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % COLUMNS_BLOCKDIM_X == 0);
assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0);
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
getLastCudaError("convolutionColumnsKernel() execution failed\n");
}
| 598cea3c9f13a7334b8aed07be98adc92b592465.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "convolutionSeparable_common.h"
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel[KERNEL_LENGTH];
extern "C" void setConvolutionKernel(float *h_Kernel)
{
cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float));
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 4
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Load main data
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0);
assert(imageH % ROWS_BLOCKDIM_Y == 0);
dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
convolutionRowsKernel<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
getLastCudaError("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % COLUMNS_BLOCKDIM_X == 0);
assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0);
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
convolutionColumnsKernel<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
getLastCudaError("convolutionColumnsKernel() execution failed\n");
}
|
6397822154c63aeb3e28bfd90ef65d9841dc2741.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/reorg_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template <typename Dtype>
__global__ void reorg_kernel(const Dtype *x, int_tp w, int_tp h, int_tp c, int_tp batch, int_tp stride, int_tp forward, Dtype *out)
{
int_tp size = batch*c*h*w;
int_tp i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= size) return;
int_tp in_index = i;
int_tp in_w = i%w;
i = i/w;
int_tp in_h = i%h;
i = i/h;
int_tp in_c = i%c;
i = i/c;
int_tp b = i%batch;
int_tp out_c = c/(stride*stride);
int_tp c2 = in_c % out_c;
int_tp offset = in_c / out_c;
int_tp w2 = in_w*stride + offset % stride;
int_tp h2 = in_h*stride + offset / stride;
int_tp out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward)
{
out[out_index] = x[in_index];
}
else
{
out[in_index] = x[out_index];
}
}
#endif // USE_ROCM
template<typename Dtype>
void ReorgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
int_tp count = bottom[0]->count();
Dtype *top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
reorg_kernel<Dtype>
CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(bottom_data, width_, height_,
channels_, batch_num_, stride_, reverse_, top_data);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
// Execute kernel
viennacl::ocl::kernel &oclk_reorg_forward = program.get_kernel(
CL_KERNEL_SELECT("reorg"));
viennacl::ocl::enqueue(
oclk_reorg_forward(count, WrapHandle((cl_mem)bottom_data, &ctx),
width_, height_, channels_, batch_num_, stride_, reverse_,
WrapHandle((cl_mem)top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
if (!propagate_down[0]) {
return;
}
int_tp count = diff_.count();
const Dtype *top_diff = diff_.mutable_gpu_diff();
Dtype *bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
reorg_kernel<Dtype>
CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(top_diff, width_, height_,
channels_, batch_num_, stride_, !reverse_, bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_reorg_backward = program.get_kernel(
CL_KERNEL_SELECT("reorg"));
viennacl::ocl::enqueue(
oclk_reorg_backward(count, WrapHandle((cl_mem)top_diff, &ctx),
width_, height_, channels_, batch_num_, stride_, int(!reverse_),
WrapHandle((cl_mem)bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer);
} // namespace caffe
| 6397822154c63aeb3e28bfd90ef65d9841dc2741.cu | #include "caffe/layers/reorg_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template <typename Dtype>
__global__ void reorg_kernel(const Dtype *x, int_tp w, int_tp h, int_tp c, int_tp batch, int_tp stride, int_tp forward, Dtype *out)
{
int_tp size = batch*c*h*w;
int_tp i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= size) return;
int_tp in_index = i;
int_tp in_w = i%w;
i = i/w;
int_tp in_h = i%h;
i = i/h;
int_tp in_c = i%c;
i = i/c;
int_tp b = i%batch;
int_tp out_c = c/(stride*stride);
int_tp c2 = in_c % out_c;
int_tp offset = in_c / out_c;
int_tp w2 = in_w*stride + offset % stride;
int_tp h2 = in_h*stride + offset / stride;
int_tp out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward)
{
out[out_index] = x[in_index];
}
else
{
out[in_index] = x[out_index];
}
}
#endif // USE_CUDA
template<typename Dtype>
void ReorgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
const Dtype *bottom_data = bottom[0]->gpu_data();
int_tp count = bottom[0]->count();
Dtype *top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
reorg_kernel<Dtype>
CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(bottom_data, width_, height_,
channels_, batch_num_, stride_, reverse_, top_data);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
// Execute kernel
viennacl::ocl::kernel &oclk_reorg_forward = program.get_kernel(
CL_KERNEL_SELECT("reorg"));
viennacl::ocl::enqueue(
oclk_reorg_forward(count, WrapHandle((cl_mem)bottom_data, &ctx),
width_, height_, channels_, batch_num_, stride_, reverse_,
WrapHandle((cl_mem)top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
if (!propagate_down[0]) {
return;
}
int_tp count = diff_.count();
const Dtype *top_diff = diff_.mutable_gpu_diff();
Dtype *bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
reorg_kernel<Dtype>
CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(top_diff, width_, height_,
channels_, batch_num_, stride_, !reverse_, bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_reorg_backward = program.get_kernel(
CL_KERNEL_SELECT("reorg"));
viennacl::ocl::enqueue(
oclk_reorg_backward(count, WrapHandle((cl_mem)top_diff, &ctx),
width_, height_, channels_, batch_num_, stride_, int(!reverse_),
WrapHandle((cl_mem)bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer);
} // namespace caffe
|
5d8f36a3efea9d59fd79204920988fdd5e836df3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/ksum_grad_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/lib/atomic.h"
#include "xdl/core/lib/binary_search.h"
#include "xdl/core/framework/gpu/gpu_device.h"
#include <hip/hip_runtime_api.h>
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void KSumGradKernel(const T* pgrad, const I* pidx, const T* pval,
const I* pgrp, size_t grp_size, size_t eb_dim,
bool average, T* pout) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= pgrp[grp_size - 1]) return;
const I* p = LowerBound(pgrp, pgrp + grp_size, idx + 1);
size_t grp_idx = p - pgrp;
size_t grp_width = (grp_idx == 0) ? p[0]
: (p[0] - p[-1]);
if (grp_width == 0) return;
const T* src = pgrad + grp_idx * eb_dim;
T* dst = pout + pidx[idx] * eb_dim;
for (size_t k = 0; k < eb_dim; ++k) {
T val = (pval != nullptr) ? pval[idx] * src[k] : src[k];
if (average) val /= grp_width;
common::gpu_atomic_add<T>(val, dst + k);
}
}
} // namespace
template <typename T, typename I>
class KSumGradGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override;
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
private:
bool average_;
};
template <typename T, typename I>
Status KSumGradGpuOp<T, I>::Init(OpKernelConstruction* ctx) {
XDL_CHECK_STATUS(ctx->GetAttr("average", &average_));
return Status::Ok();
}
template <typename T, typename I>
Status KSumGradGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor embed, index, value, segment, group, grad, out_grad;
XDL_CHECK_STATUS(ctx->GetInput(0, &embed));
XDL_CHECK_COND(1 == embed.Shape().Size(),
Status::ArgumentError("embed input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(1, &index));
XDL_CHECK_COND(1 == index.Shape().Size(),
Status::ArgumentError("index input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(2, &value));
XDL_CHECK_COND(value.Shape().NumElements() == index.Shape().NumElements() ||
value.Shape().NumElements() == 0,
Status::ArgumentError("value input size must match index"));
XDL_CHECK_STATUS(ctx->GetInput(3, &segment));
XDL_CHECK_COND(1 == segment.Shape().Size(),
Status::ArgumentError("segment input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(4, &group));
XDL_CHECK_COND(1 == group.Shape().Size(),
Status::ArgumentError("group input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(7, &grad));
XDL_CHECK_COND(2 == grad.Shape().Size(),
Status::ArgumentError("grad input dim must be 2"));
T* pgrad = grad.Raw<T>();
I* pidx = index.Raw<I>();
T* pval = value.Raw<T>();
I* pseg = segment.Raw<I>();
I* pgrp = group.Raw<I>();
if (value.Shape().NumElements() == 0) {
pval = nullptr;
}
//std::vector<int64_t> dims;
//dims.resize(embed.Shape().NumElements());
int64_t* pe = embed.Raw<int64_t>();
//CUDA_CHECK(hipMemcpy((void*)dims.data(), (void*)pe, 8 * embed.Shape().NumElements(), hipMemcpyDeviceToHost));
std::vector<size_t> sdims;
for (size_t i = 0; i < embed.Shape().NumElements(); ++i) {
sdims.push_back(pe[i]);
}
//for (auto item: dims) {
// sdims.push_back(item);
//}
TensorShape embed_shape(sdims);
size_t eb_dim = embed_shape[1];
size_t seg_size = segment.Shape().NumElements();
size_t id_size = index.Shape().NumElements();
size_t grp_size = seg_size;
XDL_CHECK(seg_size == grad.Shape()[0]) << "grad dim 0 is not equal to batch size";
if (group.Shape().NumElements() == 0) {
pgrp = pseg;
} else {
grp_size = group.Shape().NumElements();
XDL_CHECK(grp_size % seg_size == 0) << "group must be divided by segment";
}
XDL_CHECK_STATUS(ctx->AllocateOutput(0, embed_shape, &out_grad));
T* pout = out_grad.Raw<T>();
size_t bytes = sizeof(T) * embed_shape.NumElements();
CUDA_CHECK(hipMemsetAsync(pout, 0, bytes, stream->GetInternal()));
if (id_size == 0) return Status::Ok();
size_t blocks = CUDA_GET_BLOCKS(id_size);
hipLaunchKernelGGL(( KSumGradKernel<T, I>),
dim3(blocks),
dim3(CUDA_GET_THREADS(id_size, blocks)),
0,
stream->GetInternal(), pgrad, pidx, pval, pgrp, grp_size,
eb_dim, average_, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(KSumGrad, KSumGradGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
| 5d8f36a3efea9d59fd79204920988fdd5e836df3.cu | /*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/ksum_grad_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/lib/atomic.h"
#include "xdl/core/lib/binary_search.h"
#include "xdl/core/framework/gpu/gpu_device.h"
#include <cuda_runtime_api.h>
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void KSumGradKernel(const T* pgrad, const I* pidx, const T* pval,
const I* pgrp, size_t grp_size, size_t eb_dim,
bool average, T* pout) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= pgrp[grp_size - 1]) return;
const I* p = LowerBound(pgrp, pgrp + grp_size, idx + 1);
size_t grp_idx = p - pgrp;
size_t grp_width = (grp_idx == 0) ? p[0]
: (p[0] - p[-1]);
if (grp_width == 0) return;
const T* src = pgrad + grp_idx * eb_dim;
T* dst = pout + pidx[idx] * eb_dim;
for (size_t k = 0; k < eb_dim; ++k) {
T val = (pval != nullptr) ? pval[idx] * src[k] : src[k];
if (average) val /= grp_width;
common::gpu_atomic_add<T>(val, dst + k);
}
}
} // namespace
template <typename T, typename I>
class KSumGradGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override;
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
private:
bool average_;
};
template <typename T, typename I>
Status KSumGradGpuOp<T, I>::Init(OpKernelConstruction* ctx) {
XDL_CHECK_STATUS(ctx->GetAttr("average", &average_));
return Status::Ok();
}
template <typename T, typename I>
Status KSumGradGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor embed, index, value, segment, group, grad, out_grad;
XDL_CHECK_STATUS(ctx->GetInput(0, &embed));
XDL_CHECK_COND(1 == embed.Shape().Size(),
Status::ArgumentError("embed input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(1, &index));
XDL_CHECK_COND(1 == index.Shape().Size(),
Status::ArgumentError("index input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(2, &value));
XDL_CHECK_COND(value.Shape().NumElements() == index.Shape().NumElements() ||
value.Shape().NumElements() == 0,
Status::ArgumentError("value input size must match index"));
XDL_CHECK_STATUS(ctx->GetInput(3, &segment));
XDL_CHECK_COND(1 == segment.Shape().Size(),
Status::ArgumentError("segment input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(4, &group));
XDL_CHECK_COND(1 == group.Shape().Size(),
Status::ArgumentError("group input dim must be 1"));
XDL_CHECK_STATUS(ctx->GetInput(7, &grad));
XDL_CHECK_COND(2 == grad.Shape().Size(),
Status::ArgumentError("grad input dim must be 2"));
T* pgrad = grad.Raw<T>();
I* pidx = index.Raw<I>();
T* pval = value.Raw<T>();
I* pseg = segment.Raw<I>();
I* pgrp = group.Raw<I>();
if (value.Shape().NumElements() == 0) {
pval = nullptr;
}
//std::vector<int64_t> dims;
//dims.resize(embed.Shape().NumElements());
int64_t* pe = embed.Raw<int64_t>();
//CUDA_CHECK(cudaMemcpy((void*)dims.data(), (void*)pe, 8 * embed.Shape().NumElements(), cudaMemcpyDeviceToHost));
std::vector<size_t> sdims;
for (size_t i = 0; i < embed.Shape().NumElements(); ++i) {
sdims.push_back(pe[i]);
}
//for (auto item: dims) {
// sdims.push_back(item);
//}
TensorShape embed_shape(sdims);
size_t eb_dim = embed_shape[1];
size_t seg_size = segment.Shape().NumElements();
size_t id_size = index.Shape().NumElements();
size_t grp_size = seg_size;
XDL_CHECK(seg_size == grad.Shape()[0]) << "grad dim 0 is not equal to batch size";
if (group.Shape().NumElements() == 0) {
pgrp = pseg;
} else {
grp_size = group.Shape().NumElements();
XDL_CHECK(grp_size % seg_size == 0) << "group must be divided by segment";
}
XDL_CHECK_STATUS(ctx->AllocateOutput(0, embed_shape, &out_grad));
T* pout = out_grad.Raw<T>();
size_t bytes = sizeof(T) * embed_shape.NumElements();
CUDA_CHECK(cudaMemsetAsync(pout, 0, bytes, stream->GetInternal()));
if (id_size == 0) return Status::Ok();
size_t blocks = CUDA_GET_BLOCKS(id_size);
KSumGradKernel<T, I><<<
blocks,
CUDA_GET_THREADS(id_size, blocks),
0,
stream->GetInternal()>>>(pgrad, pidx, pval, pgrp, grp_size,
eb_dim, average_, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(KSumGrad, KSumGradGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
|
506075af03ca28d170ad667c16ac21c5806be81e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Simulation of flow inside a 2D square cavity
using the lattice Boltzmann method (LBM)
Written by: Abhijit Joshi ([email protected])
Last modified on: Thursday, July 18 2013 @12:08 pm
Build instructions: make (uses Makefile present in this folder)
Run instructions: optirun ./gpu_lbm
*/
#include<iostream>
#include<stdio.h>
#include<ctime> // clock_t, clock(), CLOCKS_PER_SEC
// problem parameters
const int N = 128; // number of node points along X and Y (cavity length in lattice units)
const int TIME_STEPS = 1000000; // number of time steps for which the simulation is run
const double REYNOLDS_NUMBER = 1E6; // REYNOLDS_NUMBER = LID_VELOCITY * N / kinematicViscosity
// don't change these unless you know what you are doing
const int Q = 9; // number of discrete velocity aections used
const double DENSITY = 2.7; // fluid density in lattice units
const double LID_VELOCITY = 0.05; // lid velocity in lattice units
// D2Q9 parameters
__constant__ double ex[Q];
__constant__ double ey[Q];
__constant__ int oppos[Q];
__constant__ double wt[Q];
// populate D3Q19 parameters and copy them to __constant__ memory on the GPU
void D3Q9(double *ex_h, double *ey_h, int *oppos_h, double *wt_h)
{
// D2Q9 model base velocities and weights
ex_h[0] = 0.0; ey_h[0] = 0.0; wt_h[0] = 4.0 / 9.0;
ex_h[1] = 1.0; ey_h[1] = 0.0; wt_h[1] = 1.0 / 9.0;
ex_h[2] = 0.0; ey_h[2] = 1.0; wt_h[2] = 1.0 / 9.0;
ex_h[3] = -1.0; ey_h[3] = 0.0; wt_h[3] = 1.0 / 9.0;
ex_h[4] = 0.0; ey_h[4] = -1.0; wt_h[4] = 1.0 / 9.0;
ex_h[5] = 1.0; ey_h[5] = 1.0; wt_h[5] = 1.0 / 36.0;
ex_h[6] = -1.0; ey_h[6] = 1.0; wt_h[6] = 1.0 / 36.0;
ex_h[7] = -1.0; ey_h[7] = -1.0; wt_h[7] = 1.0 / 36.0;
ex_h[8] = 1.0; ey_h[8] = -1.0; wt_h[8] = 1.0 / 36.0;
// define opposite (anti) aections (useful for implementing bounce back)
oppos_h[0] = 0; // 6 2 5
oppos_h[1] = 3; // ^
oppos_h[2] = 4; // |
oppos_h[3] = 1; // |
oppos_h[4] = 2; // 3 <----- 0 -----> 1
oppos_h[5] = 7; // |
oppos_h[6] = 8; // |
oppos_h[7] = 5; // v
oppos_h[8] = 6; // 7 4 8
// copy to constant (read-only) memory
hipMemcpyToSymbol(ex, ex_h, Q * sizeof(double)); // x-component of velocity direction
hipMemcpyToSymbol(ey, ey_h, Q * sizeof(double)); // y-component of velocity direction
hipMemcpyToSymbol(oppos, oppos_h, Q * sizeof(int)); // opposite direction for each velocity direction
hipMemcpyToSymbol(wt, wt_h, Q * sizeof(double)); // weight factor for velocity direction
}
// initialize values for aection vectors, density, velocity and distribution functions on the GPU
__global__ void initialize(const int N, const int Q, const double DENSITY, const double LID_VELOCITY,
double *rho, double *ux, double *uy, double* sigma,
double *f, double *feq, double *f_new)
{
// compute the global "i" and "j" location handled by this thread
const int i = blockIdx.x * blockDim.x + threadIdx.x ;
const int j = blockIdx.y * blockDim.y + threadIdx.y ;
// bound checking
if( (i > (N-1)) || (j > (N-1)) ) return;
// natural index for location (i,j)
const int index = i*N+j; // column-ordering
// initialize density and velocity fields inside the cavity
rho[index] = DENSITY; // density
ux[index] = 0.0; // x-component of velocity
uy[index] = 0.0; // x-component of velocity
sigma[index] = 0.0; // rate-of-strain field
// specify boundary condition for the moving lid
if(j==0) ux[index] = LID_VELOCITY;
// assign initial values for distribution functions
// along various aections using equilibriu, functions
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
double edotu = ex[a]*ux[index] + ey[a]*uy[index];
double udotu = ux[index]*ux[index] + uy[index]*uy[index];
feq[index_f] = rho[index] * wt[a] * (1.0 + 3.0*edotu + 4.5*edotu*edotu - 1.5*udotu);
f[index_f] = feq[index_f];
f_new[index_f] = feq[index_f];
}
}
// this function updates the values of the distribution functions at all points along all directions
// carries out one lattice time-step (streaming + collision) in the algorithm
__global__ void collideAndStream(// READ-ONLY parameters (used by this function but not changed)
const int N, const int Q, const double DENSITY, const double LID_VELOCITY, const double REYNOLDS_NUMBER,
// READ + WRITE parameters (get updated in this function)
double *rho, // density
double *ux, // X-velocity
double *uy, // Y-velocity
double *sigma, // rate-of-strain
double *f, // distribution function
double *feq, // equilibrium distribution function
double *f_new) // new distribution function
{
// compute the global "i" and "j" location handled by this thread
const int i = blockIdx.x * blockDim.x + threadIdx.x ;
const int j = blockIdx.y * blockDim.y + threadIdx.y ;
// bound checking
if( (i < 1) || (i > (N-2)) || (j < 1) || (j > (N-2)) ) return;
// natural index
const int index = i*N + j; // column-major ordering
// calculate fluid viscosity based on the Reynolds number
double kinematicViscosity = LID_VELOCITY * (double) N / REYNOLDS_NUMBER;
// calculate relaxation time tau
double tau = 0.5 + 3.0 * kinematicViscosity;
// collision
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
double edotu = ex[a]*ux[index] + ey[a]*uy[index];
double udotu = ux[index]*ux[index] + uy[index]*uy[index];
feq[index_f] = rho[index] * wt[a] * (1 + 3*edotu + 4.5*edotu*edotu - 1.5*udotu);
}
// streaming from interior node points
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
int index_nbr = (i+ex[a])*N + (j+ey[a]);
int index_nbr_f = a + index_nbr * Q;
int indexoppos = oppos[a] + index*Q;
double tau_eff, tau_t, C_Smagorinsky; // turbulence model parameters
C_Smagorinsky = 0.16;
// tau_t = additional contribution to the relaxation time
// because of the "eddy viscosity" model
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// REFERENCE: Krafczyk M., Tolke J. and Luo L.-S. (2003)
// Large-Eddy Simulations with a Multiple-Relaxation-Time LBE Model
// International Journal of Modern Physics B, Vol.17, 33-39
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
tau_t = 0.5*(pow(pow(tau,2) + 18.0*pow(C_Smagorinsky,2)*sigma[index],0.5) - tau);
// the effective relaxation time accounts for the additional "eddy viscosity"
// effects. Note that tau_eff now varies from point to point in the domain, and is
// larger for large strain rates. If the strain rate is zero, tau_eff = 0 and we
// revert back to the original (laminar) LBM scheme where tau_eff = tau.
tau_eff = tau + tau_t;
// post-collision distribution at (i,j) along "a"
double f_plus = f[index_f] - (f[index_f] - feq[index_f])/tau_eff;
int iS = i + ex[a]; int jS = j + ey[a];
if((iS==0) || (iS==N-1) || (jS==0) || (jS==N-1)) {
// bounce back
double ubdote = ux[index_nbr]*ex[a] + uy[index_nbr]*ey[a];
f_new[indexoppos] = f_plus - 6.0 * DENSITY * wt[a] * ubdote;
}
else {
// stream to neighbor
f_new[index_nbr_f] = f_plus;
}
}
}
__global__ void everythingElse( // READ-ONLY parameters (used by this function but not changed)
const int N, const int Q, const double DENSITY, const double LID_VELOCITY, const double REYNOLDS_NUMBER,
// READ + WRITE parameters (get updated in this function)
double *rho, // density
double *ux, // X-velocity
double *uy, // Y-velocity
double *sigma, // rate-of-strain
double *f, // distribution function
double *feq, // equilibrium distribution function
double *f_new) // new distribution function
{
// compute the global "i" and "j" location of this thread
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
// bound checking
if( (i < 1) || (i > (N-2)) || (j < 1) || (j > (N-2)) ) return;
// natural index
const int index = i*N + j; // column-major ordering
// push f_new into f
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
f[index_f] = f_new[index_f];
}
// update density at interior nodes
rho[index]=0.0;
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
rho[index] += f_new[index_f];
}
// update velocity at interior nodes
double velx=0.0;
double vely=0.0;
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
velx += f_new[index_f]*ex[a];
vely += f_new[index_f]*ey[a];
}
ux[index] = velx/rho[index];
uy[index] = vely/rho[index];
// update the rate-of-strain field
double sum_xx = 0.0, sum_xy = 0.0, sum_xz = 0.0;
double sum_yx = 0.0, sum_yy = 0.0, sum_yz = 0.0;
double sum_zx = 0.0, sum_zy = 0.0, sum_zz = 0.0;
for(int a=1; a<Q; a++)
{
int index_f = a + index*Q;
sum_xx = sum_xx + (f_new[index_f] - feq[index_f])*ex[a]*ex[a];
sum_xy = sum_xy + (f_new[index_f] - feq[index_f])*ex[a]*ey[a];
sum_xz = 0.0;
sum_yx = sum_xy;
sum_yy = sum_yy + (f_new[index_f] - feq[index_f])*ey[a]*ey[a];
sum_yz = 0.0;
sum_zx = 0.0;
sum_zy = 0.0;
sum_zz = 0.0;
}
// evaluate |S| (magnitude of the strain-rate)
sigma[index] = pow(sum_xx,2) + pow(sum_xy,2) + pow(sum_xz,2)
+ pow(sum_yx,2) + pow(sum_yy,2) + pow(sum_yz,2)
+ pow(sum_zx,2) + pow(sum_zy,2) + pow(sum_zz,2);
sigma[index] = pow(sigma[index],0.5);
}
int main(int argc, char* argv[])
{
// allocate memory on the GPU
// distribution functions
double *f, *feq, *f_new;
hipMalloc((void **)&f,N*N*Q*sizeof(double));
hipMalloc((void **)&feq,N*N*Q*sizeof(double));
hipMalloc((void **)&f_new,N*N*Q*sizeof(double));
// density and velocity
double *rho, *ux, *uy;
hipMalloc((void **)&rho,N*N*sizeof(double));
hipMalloc((void **)&ux,N*N*sizeof(double));
hipMalloc((void **)&uy,N*N*sizeof(double));
// rate-of-strain
double *sigma;
hipMalloc((void **)&sigma,N*N*sizeof(double));
// allocate space for D3Q9 parameters on the host
double *ex_h = new double[Q];
double *ey_h = new double[Q];
int *oppos_h = new int[Q];
double *wt_h = new double[Q];
// fill D3Q9 parameters in constant memory on the GPU
D3Q9(ex_h, ey_h, oppos_h, wt_h);
// assign a 2D distribution of CUDA "threads" within each CUDA "block"
int threadsAlongX=16, threadsAlongY=16;
dim3 dimBlock(threadsAlongX, threadsAlongY, 1);
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(float(N)/float(dimBlock.x)), ceil(float(N)/float(dimBlock.y)), 1 );
// launch GPU kernel to initialize all fields
hipLaunchKernelGGL(( initialize), dim3(dimGrid),dim3(dimBlock), 0, 0, N, Q, DENSITY, LID_VELOCITY,
rho, ux, uy, sigma,
f, feq, f_new);
// time integration
int time=0;
clock_t t0, tN;
t0 = clock();
while(time<TIME_STEPS) {
time++;
hipLaunchKernelGGL(( collideAndStream), dim3(dimGrid),dim3(dimBlock) , 0, 0, N, Q, DENSITY, LID_VELOCITY, REYNOLDS_NUMBER,
rho, ux, uy, sigma,
f, feq, f_new);
// collideAndStream and everythingElse were originally one kernel
// they were separated out to make all threads synchronize globally
// before moving on to the next set of calculations
hipLaunchKernelGGL(( everythingElse), dim3(dimGrid),dim3(dimBlock) , 0, 0, N, Q, DENSITY, LID_VELOCITY, REYNOLDS_NUMBER,
rho, ux, uy, sigma,
f, feq, f_new);
tN = clock() - t0;
std::cout << "Lattice time " << time
<< " clock ticks " << tN
<< " wall clock time " << tN/CLOCKS_PER_SEC
<< " lattice time steps per second = " << (float) CLOCKS_PER_SEC * time / (float) tN
<< std::endl;
}
// added this in for flushing nvprof output to screen
hipDeviceReset();
return 0;
}
| 506075af03ca28d170ad667c16ac21c5806be81e.cu | /*
Simulation of flow inside a 2D square cavity
using the lattice Boltzmann method (LBM)
Written by: Abhijit Joshi ([email protected])
Last modified on: Thursday, July 18 2013 @12:08 pm
Build instructions: make (uses Makefile present in this folder)
Run instructions: optirun ./gpu_lbm
*/
#include<iostream>
#include<stdio.h>
#include<ctime> // clock_t, clock(), CLOCKS_PER_SEC
// problem parameters
const int N = 128; // number of node points along X and Y (cavity length in lattice units)
const int TIME_STEPS = 1000000; // number of time steps for which the simulation is run
const double REYNOLDS_NUMBER = 1E6; // REYNOLDS_NUMBER = LID_VELOCITY * N / kinematicViscosity
// don't change these unless you know what you are doing
const int Q = 9; // number of discrete velocity aections used
const double DENSITY = 2.7; // fluid density in lattice units
const double LID_VELOCITY = 0.05; // lid velocity in lattice units
// D2Q9 parameters
__constant__ double ex[Q];
__constant__ double ey[Q];
__constant__ int oppos[Q];
__constant__ double wt[Q];
// populate D3Q19 parameters and copy them to __constant__ memory on the GPU
void D3Q9(double *ex_h, double *ey_h, int *oppos_h, double *wt_h)
{
// D2Q9 model base velocities and weights
ex_h[0] = 0.0; ey_h[0] = 0.0; wt_h[0] = 4.0 / 9.0;
ex_h[1] = 1.0; ey_h[1] = 0.0; wt_h[1] = 1.0 / 9.0;
ex_h[2] = 0.0; ey_h[2] = 1.0; wt_h[2] = 1.0 / 9.0;
ex_h[3] = -1.0; ey_h[3] = 0.0; wt_h[3] = 1.0 / 9.0;
ex_h[4] = 0.0; ey_h[4] = -1.0; wt_h[4] = 1.0 / 9.0;
ex_h[5] = 1.0; ey_h[5] = 1.0; wt_h[5] = 1.0 / 36.0;
ex_h[6] = -1.0; ey_h[6] = 1.0; wt_h[6] = 1.0 / 36.0;
ex_h[7] = -1.0; ey_h[7] = -1.0; wt_h[7] = 1.0 / 36.0;
ex_h[8] = 1.0; ey_h[8] = -1.0; wt_h[8] = 1.0 / 36.0;
// define opposite (anti) aections (useful for implementing bounce back)
oppos_h[0] = 0; // 6 2 5
oppos_h[1] = 3; // ^
oppos_h[2] = 4; // |
oppos_h[3] = 1; // |
oppos_h[4] = 2; // 3 <----- 0 -----> 1
oppos_h[5] = 7; // |
oppos_h[6] = 8; // |
oppos_h[7] = 5; // v
oppos_h[8] = 6; // 7 4 8
// copy to constant (read-only) memory
cudaMemcpyToSymbol(ex, ex_h, Q * sizeof(double)); // x-component of velocity direction
cudaMemcpyToSymbol(ey, ey_h, Q * sizeof(double)); // y-component of velocity direction
cudaMemcpyToSymbol(oppos, oppos_h, Q * sizeof(int)); // opposite direction for each velocity direction
cudaMemcpyToSymbol(wt, wt_h, Q * sizeof(double)); // weight factor for velocity direction
}
// initialize values for aection vectors, density, velocity and distribution functions on the GPU
__global__ void initialize(const int N, const int Q, const double DENSITY, const double LID_VELOCITY,
double *rho, double *ux, double *uy, double* sigma,
double *f, double *feq, double *f_new)
{
// compute the global "i" and "j" location handled by this thread
const int i = blockIdx.x * blockDim.x + threadIdx.x ;
const int j = blockIdx.y * blockDim.y + threadIdx.y ;
// bound checking
if( (i > (N-1)) || (j > (N-1)) ) return;
// natural index for location (i,j)
const int index = i*N+j; // column-ordering
// initialize density and velocity fields inside the cavity
rho[index] = DENSITY; // density
ux[index] = 0.0; // x-component of velocity
uy[index] = 0.0; // x-component of velocity
sigma[index] = 0.0; // rate-of-strain field
// specify boundary condition for the moving lid
if(j==0) ux[index] = LID_VELOCITY;
// assign initial values for distribution functions
// along various aections using equilibriu, functions
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
double edotu = ex[a]*ux[index] + ey[a]*uy[index];
double udotu = ux[index]*ux[index] + uy[index]*uy[index];
feq[index_f] = rho[index] * wt[a] * (1.0 + 3.0*edotu + 4.5*edotu*edotu - 1.5*udotu);
f[index_f] = feq[index_f];
f_new[index_f] = feq[index_f];
}
}
// this function updates the values of the distribution functions at all points along all directions
// carries out one lattice time-step (streaming + collision) in the algorithm
__global__ void collideAndStream(// READ-ONLY parameters (used by this function but not changed)
const int N, const int Q, const double DENSITY, const double LID_VELOCITY, const double REYNOLDS_NUMBER,
// READ + WRITE parameters (get updated in this function)
double *rho, // density
double *ux, // X-velocity
double *uy, // Y-velocity
double *sigma, // rate-of-strain
double *f, // distribution function
double *feq, // equilibrium distribution function
double *f_new) // new distribution function
{
// compute the global "i" and "j" location handled by this thread
const int i = blockIdx.x * blockDim.x + threadIdx.x ;
const int j = blockIdx.y * blockDim.y + threadIdx.y ;
// bound checking
if( (i < 1) || (i > (N-2)) || (j < 1) || (j > (N-2)) ) return;
// natural index
const int index = i*N + j; // column-major ordering
// calculate fluid viscosity based on the Reynolds number
double kinematicViscosity = LID_VELOCITY * (double) N / REYNOLDS_NUMBER;
// calculate relaxation time tau
double tau = 0.5 + 3.0 * kinematicViscosity;
// collision
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
double edotu = ex[a]*ux[index] + ey[a]*uy[index];
double udotu = ux[index]*ux[index] + uy[index]*uy[index];
feq[index_f] = rho[index] * wt[a] * (1 + 3*edotu + 4.5*edotu*edotu - 1.5*udotu);
}
// streaming from interior node points
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
int index_nbr = (i+ex[a])*N + (j+ey[a]);
int index_nbr_f = a + index_nbr * Q;
int indexoppos = oppos[a] + index*Q;
double tau_eff, tau_t, C_Smagorinsky; // turbulence model parameters
C_Smagorinsky = 0.16;
// tau_t = additional contribution to the relaxation time
// because of the "eddy viscosity" model
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// REFERENCE: Krafczyk M., Tolke J. and Luo L.-S. (2003)
// Large-Eddy Simulations with a Multiple-Relaxation-Time LBE Model
// International Journal of Modern Physics B, Vol.17, 33-39
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
tau_t = 0.5*(pow(pow(tau,2) + 18.0*pow(C_Smagorinsky,2)*sigma[index],0.5) - tau);
// the effective relaxation time accounts for the additional "eddy viscosity"
// effects. Note that tau_eff now varies from point to point in the domain, and is
// larger for large strain rates. If the strain rate is zero, tau_eff = 0 and we
// revert back to the original (laminar) LBM scheme where tau_eff = tau.
tau_eff = tau + tau_t;
// post-collision distribution at (i,j) along "a"
double f_plus = f[index_f] - (f[index_f] - feq[index_f])/tau_eff;
int iS = i + ex[a]; int jS = j + ey[a];
if((iS==0) || (iS==N-1) || (jS==0) || (jS==N-1)) {
// bounce back
double ubdote = ux[index_nbr]*ex[a] + uy[index_nbr]*ey[a];
f_new[indexoppos] = f_plus - 6.0 * DENSITY * wt[a] * ubdote;
}
else {
// stream to neighbor
f_new[index_nbr_f] = f_plus;
}
}
}
__global__ void everythingElse( // READ-ONLY parameters (used by this function but not changed)
const int N, const int Q, const double DENSITY, const double LID_VELOCITY, const double REYNOLDS_NUMBER,
// READ + WRITE parameters (get updated in this function)
double *rho, // density
double *ux, // X-velocity
double *uy, // Y-velocity
double *sigma, // rate-of-strain
double *f, // distribution function
double *feq, // equilibrium distribution function
double *f_new) // new distribution function
{
// compute the global "i" and "j" location of this thread
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
// bound checking
if( (i < 1) || (i > (N-2)) || (j < 1) || (j > (N-2)) ) return;
// natural index
const int index = i*N + j; // column-major ordering
// push f_new into f
#pragma unroll
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
f[index_f] = f_new[index_f];
}
// update density at interior nodes
rho[index]=0.0;
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
rho[index] += f_new[index_f];
}
// update velocity at interior nodes
double velx=0.0;
double vely=0.0;
for(int a=0;a<Q;a++) {
int index_f = a + index*Q;
velx += f_new[index_f]*ex[a];
vely += f_new[index_f]*ey[a];
}
ux[index] = velx/rho[index];
uy[index] = vely/rho[index];
// update the rate-of-strain field
double sum_xx = 0.0, sum_xy = 0.0, sum_xz = 0.0;
double sum_yx = 0.0, sum_yy = 0.0, sum_yz = 0.0;
double sum_zx = 0.0, sum_zy = 0.0, sum_zz = 0.0;
for(int a=1; a<Q; a++)
{
int index_f = a + index*Q;
sum_xx = sum_xx + (f_new[index_f] - feq[index_f])*ex[a]*ex[a];
sum_xy = sum_xy + (f_new[index_f] - feq[index_f])*ex[a]*ey[a];
sum_xz = 0.0;
sum_yx = sum_xy;
sum_yy = sum_yy + (f_new[index_f] - feq[index_f])*ey[a]*ey[a];
sum_yz = 0.0;
sum_zx = 0.0;
sum_zy = 0.0;
sum_zz = 0.0;
}
// evaluate |S| (magnitude of the strain-rate)
sigma[index] = pow(sum_xx,2) + pow(sum_xy,2) + pow(sum_xz,2)
+ pow(sum_yx,2) + pow(sum_yy,2) + pow(sum_yz,2)
+ pow(sum_zx,2) + pow(sum_zy,2) + pow(sum_zz,2);
sigma[index] = pow(sigma[index],0.5);
}
int main(int argc, char* argv[])
{
// allocate memory on the GPU
// distribution functions
double *f, *feq, *f_new;
cudaMalloc((void **)&f,N*N*Q*sizeof(double));
cudaMalloc((void **)&feq,N*N*Q*sizeof(double));
cudaMalloc((void **)&f_new,N*N*Q*sizeof(double));
// density and velocity
double *rho, *ux, *uy;
cudaMalloc((void **)&rho,N*N*sizeof(double));
cudaMalloc((void **)&ux,N*N*sizeof(double));
cudaMalloc((void **)&uy,N*N*sizeof(double));
// rate-of-strain
double *sigma;
cudaMalloc((void **)&sigma,N*N*sizeof(double));
// allocate space for D3Q9 parameters on the host
double *ex_h = new double[Q];
double *ey_h = new double[Q];
int *oppos_h = new int[Q];
double *wt_h = new double[Q];
// fill D3Q9 parameters in constant memory on the GPU
D3Q9(ex_h, ey_h, oppos_h, wt_h);
// assign a 2D distribution of CUDA "threads" within each CUDA "block"
int threadsAlongX=16, threadsAlongY=16;
dim3 dimBlock(threadsAlongX, threadsAlongY, 1);
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(float(N)/float(dimBlock.x)), ceil(float(N)/float(dimBlock.y)), 1 );
// launch GPU kernel to initialize all fields
initialize<<<dimGrid,dimBlock>>>(N, Q, DENSITY, LID_VELOCITY,
rho, ux, uy, sigma,
f, feq, f_new);
// time integration
int time=0;
clock_t t0, tN;
t0 = clock();
while(time<TIME_STEPS) {
time++;
collideAndStream<<<dimGrid,dimBlock >>>(N, Q, DENSITY, LID_VELOCITY, REYNOLDS_NUMBER,
rho, ux, uy, sigma,
f, feq, f_new);
// collideAndStream and everythingElse were originally one kernel
// they were separated out to make all threads synchronize globally
// before moving on to the next set of calculations
everythingElse<<<dimGrid,dimBlock >>>(N, Q, DENSITY, LID_VELOCITY, REYNOLDS_NUMBER,
rho, ux, uy, sigma,
f, feq, f_new);
tN = clock() - t0;
std::cout << "Lattice time " << time
<< " clock ticks " << tN
<< " wall clock time " << tN/CLOCKS_PER_SEC
<< " lattice time steps per second = " << (float) CLOCKS_PER_SEC * time / (float) tN
<< std::endl;
}
// added this in for flushing nvprof output to screen
cudaDeviceReset();
return 0;
}
|
06fa3779f2f0d40d36b0c6082dbbd39e7f370709.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void sgemm_nn_vec_128x64(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[128*8*2 + 64*8*2 + 4];
*param_C = share[0];
}
| 06fa3779f2f0d40d36b0c6082dbbd39e7f370709.cu |
extern "C" __global__ void sgemm_nn_vec_128x64(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[128*8*2 + 64*8*2 + 4];
*param_C = share[0];
}
|
34e57debd051f21f4a932c4fa8b987696c2baa17.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=64 --gridDim=64 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 2//64
__device__ void bar(float x) {
assert(1);
}
__global__ void foo(int* A) {
bar(A[0]);
}
int main(){
int *b;
int *dev_b;
b = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i){
b[i] = i+1;
//printf(" %d; ", b[i]);
}
hipMalloc((void**)&dev_b, N*sizeof(float));
hipMemcpy(dev_b, b, N*sizeof(float), hipMemcpyHostToDevice);
//foo<<<1,N>>>(dev_b);
ESBMC_verify_kernel(foo,1,N,dev_b);
free(b);
hipFree(dev_b);
}
| 34e57debd051f21f4a932c4fa8b987696c2baa17.cu | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//64
__device__ void bar(float x) {
assert(1);
}
__global__ void foo(int* A) {
bar(A[0]);
}
int main(){
int *b;
int *dev_b;
b = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i){
b[i] = i+1;
//printf(" %d; ", b[i]);
}
cudaMalloc((void**)&dev_b, N*sizeof(float));
cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice);
//foo<<<1,N>>>(dev_b);
ESBMC_verify_kernel(foo,1,N,dev_b);
free(b);
cudaFree(dev_b);
}
|
1295933487162777c729d42bb098094ab1857c44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matproductsharedmemory(int *l,int *m, int *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
__shared__ int p[col1];
int i;
int k=threadIdx.x;
n[col2*y+x]=0;
p[k]=l[col1*y+k]*m[col2*k+x];
__syncthreads();
for(i=0;i<col1;i++)
n[col2*y+x]=n[col2*y+x]+p[i];
}
int main()
{
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
hipMalloc((void **)&d,row1*col1*sizeof(int));
hipMalloc((void **)&e,row2*col2*sizeof(int));
hipMalloc((void **)&f,row1*col2*sizeof(int));
hipMemcpy(d,a,row1*col1*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(e,b,row2*col2*sizeof(int),hipMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
hipLaunchKernelGGL((
matproductsharedmemory), dim3(grid),dim3(col1), 0, 0, d,e,f);
hipMemcpy(c,f,row1*col2*sizeof(int),hipMemcpyDeviceToHost);
printf("\n Product of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
hipFree(d);
hipFree(e);
hipFree(f);
return 0;
}
| 1295933487162777c729d42bb098094ab1857c44.cu | #include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matproductsharedmemory(int *l,int *m, int *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
__shared__ int p[col1];
int i;
int k=threadIdx.x;
n[col2*y+x]=0;
p[k]=l[col1*y+k]*m[col2*k+x];
__syncthreads();
for(i=0;i<col1;i++)
n[col2*y+x]=n[col2*y+x]+p[i];
}
int main()
{
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
cudaMalloc((void **)&d,row1*col1*sizeof(int));
cudaMalloc((void **)&e,row2*col2*sizeof(int));
cudaMalloc((void **)&f,row1*col2*sizeof(int));
cudaMemcpy(d,a,row1*col1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,row2*col2*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
matproductsharedmemory<<<grid,col1>>>(d,e,f);
cudaMemcpy(c,f,row1*col2*sizeof(int),cudaMemcpyDeviceToHost);
printf("\n Product of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
cudaFree(d);
cudaFree(e);
cudaFree(f);
return 0;
}
|
8557e3834418b72af91b7493cfbe4ca7c60fe9e3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample implements 64-bin histogram calculation
* of arbitrary-sized 8-bit data array
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
//#include <cutil_inline.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Reference CPU histogram
////////////////////////////////////////////////////////////////////////////////
extern "C" void histogram64CPU(
unsigned int *h_Result,
unsigned int *h_Data,
int dataN
);
////////////////////////////////////////////////////////////////////////////////
// GPU histogram
////////////////////////////////////////////////////////////////////////////////
#include "histogram64_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Input data size
const int DATA_N = 1000000;
const int DATA_SIZE = DATA_N * sizeof(unsigned char);
//Repeat computations multiple times to improve timing precision
const int NUM_ITERATIONS = 1;
int main(int argc, char **argv){
unsigned char
*h_Data;
unsigned int
*h_HistogramCPU, *h_HistogramGPU;
unsigned char
*d_Data;
unsigned int
*d_Histogram;
int i, sum, delta, iter;
double timerValue;
unsigned int hTimer;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError(cutCreateTimer(&hTimer));
printf("Initializing data...\n");
printf("...allocating CPU memory.\n");
h_Data = (unsigned char *)malloc(DATA_SIZE);
h_HistogramCPU = (unsigned int *)malloc(HISTOGRAM_SIZE);
h_HistogramGPU = (unsigned int *)malloc(HISTOGRAM_SIZE);
printf("...generating input data\n");
srand(2007);
for(i = 0; i < DATA_N; i++)
h_Data[i] = rand() % 256;
printf("...allocating GPU memory and copying input data\n");
cutilSafeCall( hipMalloc((void **)&d_Data, DATA_SIZE ) );
cutilSafeCall( hipMalloc((void **)&d_Histogram, HISTOGRAM_SIZE ) );
cutilSafeCall( hipMemcpy(d_Data, h_Data, DATA_SIZE, hipMemcpyHostToDevice) );
printf("Running GPU histogram (%i iterations)...\n", NUM_ITERATIONS);
initHistogram64GPU();
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(iter = 0; iter < NUM_ITERATIONS; iter++);
histogram64GPU(d_Histogram, (unsigned int *)d_Data, DATA_N);
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer));
timerValue = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
closeHistogram64GPU();
printf("histogram64GPU() time (average) : %f msec //%f MB/sec\n", timerValue, DATA_SIZE / (1e+6 * timerValue * 0.001));
printf("Reading back GPU results...\n");
cutilSafeCall( hipMemcpy(h_HistogramGPU, d_Histogram, HISTOGRAM_SIZE, hipMemcpyDeviceToHost) );
printf("Comparing the results...\n");
printf("...histogramCPU()\n");
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
histogram64CPU(
h_HistogramCPU,
(unsigned int *)h_Data,
DATA_N / 4
);
cutilCheckError(cutStopTimer(hTimer));
timerValue = cutGetTimerValue(hTimer);
printf("histogram64CPU() time : %f msec //%f MB/sec\n", timerValue, DATA_SIZE / (1048576.0 * timerValue * 0.001));
sum = 0;
delta = 0;
for(i = 0; i < BIN_COUNT; i++){
sum += h_HistogramGPU[i];
delta += abs((int)h_HistogramGPU[i] - (int)h_HistogramCPU[i]);
}
printf("Total sum of histogram elements: %i\n", sum);
printf("Sum of absolute differences: %i\n", delta);
printf((delta == 0) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilCheckError(cutDeleteTimer(hTimer));
cutilSafeCall( hipFree(d_Histogram) );
cutilSafeCall( hipFree(d_Data) );
free(h_HistogramGPU);
free(h_HistogramCPU);
free(h_Data);
hipDeviceReset();
cutilExit(argc, argv);
}
| 8557e3834418b72af91b7493cfbe4ca7c60fe9e3.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample implements 64-bin histogram calculation
* of arbitrary-sized 8-bit data array
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
//#include <cutil_inline.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Reference CPU histogram
////////////////////////////////////////////////////////////////////////////////
extern "C" void histogram64CPU(
unsigned int *h_Result,
unsigned int *h_Data,
int dataN
);
////////////////////////////////////////////////////////////////////////////////
// GPU histogram
////////////////////////////////////////////////////////////////////////////////
#include "histogram64_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Input data size
const int DATA_N = 1000000;
const int DATA_SIZE = DATA_N * sizeof(unsigned char);
//Repeat computations multiple times to improve timing precision
const int NUM_ITERATIONS = 1;
int main(int argc, char **argv){
unsigned char
*h_Data;
unsigned int
*h_HistogramCPU, *h_HistogramGPU;
unsigned char
*d_Data;
unsigned int
*d_Histogram;
int i, sum, delta, iter;
double timerValue;
unsigned int hTimer;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError(cutCreateTimer(&hTimer));
printf("Initializing data...\n");
printf("...allocating CPU memory.\n");
h_Data = (unsigned char *)malloc(DATA_SIZE);
h_HistogramCPU = (unsigned int *)malloc(HISTOGRAM_SIZE);
h_HistogramGPU = (unsigned int *)malloc(HISTOGRAM_SIZE);
printf("...generating input data\n");
srand(2007);
for(i = 0; i < DATA_N; i++)
h_Data[i] = rand() % 256;
printf("...allocating GPU memory and copying input data\n");
cutilSafeCall( cudaMalloc((void **)&d_Data, DATA_SIZE ) );
cutilSafeCall( cudaMalloc((void **)&d_Histogram, HISTOGRAM_SIZE ) );
cutilSafeCall( cudaMemcpy(d_Data, h_Data, DATA_SIZE, cudaMemcpyHostToDevice) );
printf("Running GPU histogram (%i iterations)...\n", NUM_ITERATIONS);
initHistogram64GPU();
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(iter = 0; iter < NUM_ITERATIONS; iter++);
histogram64GPU(d_Histogram, (unsigned int *)d_Data, DATA_N);
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer));
timerValue = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
closeHistogram64GPU();
printf("histogram64GPU() time (average) : %f msec //%f MB/sec\n", timerValue, DATA_SIZE / (1e+6 * timerValue * 0.001));
printf("Reading back GPU results...\n");
cutilSafeCall( cudaMemcpy(h_HistogramGPU, d_Histogram, HISTOGRAM_SIZE, cudaMemcpyDeviceToHost) );
printf("Comparing the results...\n");
printf("...histogramCPU()\n");
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
histogram64CPU(
h_HistogramCPU,
(unsigned int *)h_Data,
DATA_N / 4
);
cutilCheckError(cutStopTimer(hTimer));
timerValue = cutGetTimerValue(hTimer);
printf("histogram64CPU() time : %f msec //%f MB/sec\n", timerValue, DATA_SIZE / (1048576.0 * timerValue * 0.001));
sum = 0;
delta = 0;
for(i = 0; i < BIN_COUNT; i++){
sum += h_HistogramGPU[i];
delta += abs((int)h_HistogramGPU[i] - (int)h_HistogramCPU[i]);
}
printf("Total sum of histogram elements: %i\n", sum);
printf("Sum of absolute differences: %i\n", delta);
printf((delta == 0) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilCheckError(cutDeleteTimer(hTimer));
cutilSafeCall( cudaFree(d_Histogram) );
cutilSafeCall( cudaFree(d_Data) );
free(h_HistogramGPU);
free(h_HistogramCPU);
free(h_Data);
cudaThreadExit();
cutilExit(argc, argv);
}
|
2fa0d8e821e9dd1fac3349d0fa572622f6035c7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(0.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
cue = sins(cue*aon)-cosc(cue*uon);
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 2fa0d8e821e9dd1fac3349d0fa572622f6035c7b.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 10;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(0.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
cue = sins(cue*aon)-cosc(cue*uon);
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
b7bff223e2d184d33a2fd719657936423fe540f3.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
#define expf(a) (a)
#define powf(a,b) (a+b)
#define logf(a) (a)
#define sqrtf(a) (a)
*/
__device__ void
CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce all points on the X-Y plate
__shared__ float evdw[TperB]; // e[0]
__shared__ float eele[TperB]; // e[1]
__shared__ float epmf[TperB]; // e[2]
__shared__ float epsp[TperB]; // e[3]
__shared__ float ehdb[TperB]; // e[4]
// reduce through only x axis
__shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ???????
__shared__ float a_sz[BDy][BDx]; // ???????
__shared__ float ehpc[BDy]; // e[5]
__shared__ float ekde[BDy]; // e[6]
__shared__ float elhm[BDy]; // e[7]
evdw[bidx] = 0.0f;
eele[bidx] = 0.0f;
epmf[bidx] = 0.0f;
epsp[bidx] = 0.0f;
ehdb[bidx] = 0.0f;
if (bidx < BDy) {
ehpc[bidx] = 0.0f;
ekde[bidx] = 0.0f;
elhm[bidx] = 0.0f;
}
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst_pow2 = dx * dx + dy * dy + dz * dz;
const float dst_pow4 = dst_pow2 * dst_pow2;
const float dst = sqrtf (dst_pow2);
/* hydrophobic potential */
if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) {
a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] *
(1.0f - (3.5f / 81.0f * dst_pow2 -
4.5f / 81.0f / 81.0f * dst_pow4 +
2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 -
0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4));
}
/* L-J potential */
const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst);
const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2);
const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f;
evdw[bidx] += (p1 - p2) / p4;
/* electrostatic potential */
const float s1 = enepara_el1_dc * dst;
float g1;
if (s1 < 1)
g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1;
else
g1 = 1.0f / s1;
eele[bidx] += mylig->c[l] * myprt->ele[p] * g1;
/* contact potential */
const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t];
epmf[bidx] +=
enepara_dc->pmf1[lig_t][prt_t] /
(1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0));
/* pocket-specific potential */
// the senmatics do not match with the original program:
// if (found psp[][])
// accumulate to epsp;
// else
// do nothing
if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) {
const int i1 = myprt->seq3r[p];
epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix
}
/* hydrogen bond potential */
const float hdb0 = enepara_dc->hdb0[lig_t][prt_t];
if (hdb0 > 0.1f) {
const float hdb1 = enepara_dc->hdb1[lig_t][prt_t];
const float hdb3 = (dst - hdb0) * hdb1;
ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3);
}
} // if (p < pnp_dc)
} // prt loop
} // if (l < lna_dc)
/* hydrophobic restraits*/
SumReduction2D_d (a_val);
// transpose may help improve the performance
if (threadIdx.x == 0 && l < lna_dc) {
const int lig_t = mylig->t[l];
const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t];
ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t];
}
} // lig loop
SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb);
if (bidx == 0) {
float eehpc = 0.0f;
for (int i = 0; i < BDy; ++i)
eehpc += ehpc[i];
ehpc[0] = eehpc;
}
#if 1
/* kde potential */
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
// kde loop, ~400
for (int j = 0; j < pnk_dc; j += blockDim.x) {
const int k = j + threadIdx.x;
if (k < pnk_dc) {
if (mylig->t[l] == kde_dc->t[k]) {
const float dx = mylig->coord_new.x[l] - kde_dc->x[k];
const float dy = mylig->coord_new.y[l] - kde_dc->y[k];
const float dz = mylig->coord_new.z[l] - kde_dc->z[k];
const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz;
a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2);
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (k < pnk_dc)
} // kde loop
} // if (l < lna_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f)
ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
} // lig loop
__syncthreads ();
if (bidx == 0) {
float eekde = 0.0f;
for (int i = 0; i < BDy; ++i)
eekde += ekde[i];
eekde = eekde / enepara_kde3_dc;
ekde[0] = eekde;
}
__syncthreads ();
#endif
#if 1
/* position restraints */
// lhm loop, ~11
for (int i = 0; i < pos_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int m = i + threadIdx.y;
if (m < pos_dc) {
// lig loop, ~30
for (int j = 0; j < lna_dc; j += blockDim.x) {
const int l = j + threadIdx.x;
if (l < lna_dc) {
const int lig_n = mylig->n[l] + 1;
if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) {
const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n];
const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n];
const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n];
a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz;
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (l < lna_dc)
} // lig loop
} // if (m < pos_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && m < pos_dc) {
elhm[threadIdx.y] +=
mcs_dc[m].tcc *
sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
}
} // lhm loop
__syncthreads ();
if (bidx == 0) {
float eelhm = 0.0f;
for (int i = 0; i < BDy; ++i)
eelhm += elhm[i];
// dropped the protection (if pos_dc != 0)
eelhm = logf (eelhm / pos_dc);
elhm[0] = eelhm;
}
__syncthreads ();
#endif
// energy edst e[8]
__shared__ float edst;
if (bidx == 0) {
const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0];
const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1];
const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2];
edst = sqrtf (dx * dx + dy * dy + dz * dz);
}
__syncthreads ();
if (bidx == 0) {
evdw[0] = evdw[0] / lna_dc;
eele[0] = eele[0] / lna_dc;
epmf[0] = epmf[0] / lna_dc;
epsp[0] = epsp[0] / lna_dc;
ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f;
// ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster
ehpc[0] = ehpc[0] / lna_dc;
ekde[0] = ekde[0] / lna_dc;
// calculate normalized energy
evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0];
eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1];
epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2];
epsp[0] = enepara_dc->a_para[3] * epsp[0] + enepara_dc->b_para[3];
ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4];
ehpc[0] = enepara_dc->a_para[5] * ehpc[0] + enepara_dc->b_para[5];
ekde[0] = enepara_dc->a_para[6] * ekde[0] + enepara_dc->b_para[6];
elhm[0] = enepara_dc->a_para[7] * elhm[0] + enepara_dc->b_para[7];
edst = enepara_dc->a_para[8] * edst + enepara_dc->b_para[8];
#if IS_BAYE == 1
// calculate conditional prob belonging to high decoy
const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE);
const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE);
const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE);
const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE);
const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE);
const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE);
const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE);
const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE);
const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE);
const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE);
const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE);
const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE);
const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE);
const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE);
const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE);
const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE);
const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE);
const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE);
// calculate conditional prob
const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h)
+ log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h);
const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l)
+ log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l);
const float etotal = prob_l - prob_h;
#elif IS_BAYE == 0
const float etotal =
enepara_dc->w[0] * evdw[0] +
enepara_dc->w[1] * eele[0] +
enepara_dc->w[2] * epmf[0] +
enepara_dc->w[3] * epsp[0] +
enepara_dc->w[4] * ehdb[0] +
enepara_dc->w[5] * ehpc[0] +
enepara_dc->w[6] * ekde[0] +
enepara_dc->w[7] * elhm[0] +
enepara_dc->w[8] * edst;
#endif
float * e = &mylig->energy_new.e[0];
e[0] = evdw[0];
e[1] = eele[0];
e[2] = epmf[0];
e[3] = epsp[0];
e[4] = ehdb[0];
e[5] = ehpc[0];
e[6] = ekde[0];
e[7] = elhm[0];
e[8] = edst;
e[9] = etotal;
// e[9] = edst;
}
}
| b7bff223e2d184d33a2fd719657936423fe540f3.cu | /*
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
#define expf(a) (a)
#define powf(a,b) (a+b)
#define logf(a) (a)
#define sqrtf(a) (a)
*/
__device__ void
CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce all points on the X-Y plate
__shared__ float evdw[TperB]; // e[0]
__shared__ float eele[TperB]; // e[1]
__shared__ float epmf[TperB]; // e[2]
__shared__ float epsp[TperB]; // e[3]
__shared__ float ehdb[TperB]; // e[4]
// reduce through only x axis
__shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ???????
__shared__ float a_sz[BDy][BDx]; // ???????
__shared__ float ehpc[BDy]; // e[5]
__shared__ float ekde[BDy]; // e[6]
__shared__ float elhm[BDy]; // e[7]
evdw[bidx] = 0.0f;
eele[bidx] = 0.0f;
epmf[bidx] = 0.0f;
epsp[bidx] = 0.0f;
ehdb[bidx] = 0.0f;
if (bidx < BDy) {
ehpc[bidx] = 0.0f;
ekde[bidx] = 0.0f;
elhm[bidx] = 0.0f;
}
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst_pow2 = dx * dx + dy * dy + dz * dz;
const float dst_pow4 = dst_pow2 * dst_pow2;
const float dst = sqrtf (dst_pow2);
/* hydrophobic potential */
if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) {
a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] *
(1.0f - (3.5f / 81.0f * dst_pow2 -
4.5f / 81.0f / 81.0f * dst_pow4 +
2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 -
0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4));
}
/* L-J potential */
const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst);
const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2);
const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f;
evdw[bidx] += (p1 - p2) / p4;
/* electrostatic potential */
const float s1 = enepara_el1_dc * dst;
float g1;
if (s1 < 1)
g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1;
else
g1 = 1.0f / s1;
eele[bidx] += mylig->c[l] * myprt->ele[p] * g1;
/* contact potential */
const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t];
epmf[bidx] +=
enepara_dc->pmf1[lig_t][prt_t] /
(1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0));
/* pocket-specific potential */
// the senmatics do not match with the original program:
// if (found psp[][])
// accumulate to epsp;
// else
// do nothing
if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) {
const int i1 = myprt->seq3r[p];
epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix
}
/* hydrogen bond potential */
const float hdb0 = enepara_dc->hdb0[lig_t][prt_t];
if (hdb0 > 0.1f) {
const float hdb1 = enepara_dc->hdb1[lig_t][prt_t];
const float hdb3 = (dst - hdb0) * hdb1;
ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3);
}
} // if (p < pnp_dc)
} // prt loop
} // if (l < lna_dc)
/* hydrophobic restraits*/
SumReduction2D_d (a_val);
// transpose may help improve the performance
if (threadIdx.x == 0 && l < lna_dc) {
const int lig_t = mylig->t[l];
const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t];
ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t];
}
} // lig loop
SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb);
if (bidx == 0) {
float eehpc = 0.0f;
for (int i = 0; i < BDy; ++i)
eehpc += ehpc[i];
ehpc[0] = eehpc;
}
#if 1
/* kde potential */
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
// kde loop, ~400
for (int j = 0; j < pnk_dc; j += blockDim.x) {
const int k = j + threadIdx.x;
if (k < pnk_dc) {
if (mylig->t[l] == kde_dc->t[k]) {
const float dx = mylig->coord_new.x[l] - kde_dc->x[k];
const float dy = mylig->coord_new.y[l] - kde_dc->y[k];
const float dz = mylig->coord_new.z[l] - kde_dc->z[k];
const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz;
a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2);
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (k < pnk_dc)
} // kde loop
} // if (l < lna_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f)
ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
} // lig loop
__syncthreads ();
if (bidx == 0) {
float eekde = 0.0f;
for (int i = 0; i < BDy; ++i)
eekde += ekde[i];
eekde = eekde / enepara_kde3_dc;
ekde[0] = eekde;
}
__syncthreads ();
#endif
#if 1
/* position restraints */
// lhm loop, ~11
for (int i = 0; i < pos_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int m = i + threadIdx.y;
if (m < pos_dc) {
// lig loop, ~30
for (int j = 0; j < lna_dc; j += blockDim.x) {
const int l = j + threadIdx.x;
if (l < lna_dc) {
const int lig_n = mylig->n[l] + 1;
if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) {
const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n];
const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n];
const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n];
a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz;
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (l < lna_dc)
} // lig loop
} // if (m < pos_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && m < pos_dc) {
elhm[threadIdx.y] +=
mcs_dc[m].tcc *
sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
}
} // lhm loop
__syncthreads ();
if (bidx == 0) {
float eelhm = 0.0f;
for (int i = 0; i < BDy; ++i)
eelhm += elhm[i];
// dropped the protection (if pos_dc != 0)
eelhm = logf (eelhm / pos_dc);
elhm[0] = eelhm;
}
__syncthreads ();
#endif
// energy edst e[8]
__shared__ float edst;
if (bidx == 0) {
const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0];
const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1];
const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2];
edst = sqrtf (dx * dx + dy * dy + dz * dz);
}
__syncthreads ();
if (bidx == 0) {
evdw[0] = evdw[0] / lna_dc;
eele[0] = eele[0] / lna_dc;
epmf[0] = epmf[0] / lna_dc;
epsp[0] = epsp[0] / lna_dc;
ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f;
// ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster
ehpc[0] = ehpc[0] / lna_dc;
ekde[0] = ekde[0] / lna_dc;
// calculate normalized energy
evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0];
eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1];
epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2];
epsp[0] = enepara_dc->a_para[3] * epsp[0] + enepara_dc->b_para[3];
ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4];
ehpc[0] = enepara_dc->a_para[5] * ehpc[0] + enepara_dc->b_para[5];
ekde[0] = enepara_dc->a_para[6] * ekde[0] + enepara_dc->b_para[6];
elhm[0] = enepara_dc->a_para[7] * elhm[0] + enepara_dc->b_para[7];
edst = enepara_dc->a_para[8] * edst + enepara_dc->b_para[8];
#if IS_BAYE == 1
// calculate conditional prob belonging to high decoy
const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE);
const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE);
const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE);
const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE);
const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE);
const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE);
const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE);
const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE);
const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE);
const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE);
const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE);
const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE);
const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE);
const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE);
const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE);
const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE);
const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE);
const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE);
// calculate conditional prob
const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h)
+ log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h);
const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l)
+ log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l);
const float etotal = prob_l - prob_h;
#elif IS_BAYE == 0
const float etotal =
enepara_dc->w[0] * evdw[0] +
enepara_dc->w[1] * eele[0] +
enepara_dc->w[2] * epmf[0] +
enepara_dc->w[3] * epsp[0] +
enepara_dc->w[4] * ehdb[0] +
enepara_dc->w[5] * ehpc[0] +
enepara_dc->w[6] * ekde[0] +
enepara_dc->w[7] * elhm[0] +
enepara_dc->w[8] * edst;
#endif
float * e = &mylig->energy_new.e[0];
e[0] = evdw[0];
e[1] = eele[0];
e[2] = epmf[0];
e[3] = epsp[0];
e[4] = ehdb[0];
e[5] = ehpc[0];
e[6] = ekde[0];
e[7] = elhm[0];
e[8] = edst;
e[9] = etotal;
// e[9] = edst;
}
}
|
f3b6b26f8a43ebc1cfd718196d1d0c8864c6b830.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test showing huge access speed gap
* between aligned and misaligned structures
* (those having/missing __align__ keyword).
* It measures per-element copy throughput for
* aligned and misaligned structures on
* big chunks of data.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_functions.h> // helper utility functions
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Misaligned types
////////////////////////////////////////////////////////////////////////////////
typedef unsigned char uint8;
typedef unsigned short int uint16;
typedef struct
{
unsigned char r, g, b, a;
} RGBA8_misaligned;
typedef struct
{
unsigned int l, a;
} LA32_misaligned;
typedef struct
{
unsigned int r, g, b;
} RGB32_misaligned;
typedef struct
{
unsigned int r, g, b, a;
} RGBA32_misaligned;
////////////////////////////////////////////////////////////////////////////////
// Aligned types
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(4)
{
unsigned char r, g, b, a;
}
RGBA8;
typedef unsigned int I32;
typedef struct __align__(8)
{
unsigned int l, a;
}
LA32;
typedef struct __align__(16)
{
unsigned int r, g, b;
}
RGB32;
typedef struct __align__(16)
{
unsigned int r, g, b, a;
}
RGBA32;
////////////////////////////////////////////////////////////////////////////////
// Because G80 class hardware natively supports global memory operations
// only with data elements of 4, 8 and 16 bytes, if structure size
// exceeds 16 bytes, it can't be efficiently read or written,
// since more than one global memory non-coalescable load/store instructions
// will be generated, even if __align__ option is supplied.
// "Structure of arrays" storage strategy offers best performance
// in general case. See section 5.1.2 of the Programming Guide.
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16)
{
RGBA32 c1, c2;
}
RGBA32_2;
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b)
{
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b)
{
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b)
{
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// Simple CUDA kernel.
// Copy is carried out on per-element basis,
// so it's not per-byte in case of padded structures.
////////////////////////////////////////////////////////////////////////////////
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
////////////////////////////////////////////////////////////////////////////////
// Validation routine for simple copy kernel.
// We must know "packed" size of TData (number_of_fields * sizeof(simple_type))
// and compare only these "packed" parts of the structure,
// containig actual user data. The compiler behavior with padding bytes
// is undefined, since padding is merely a placeholder
// and doesn't contain any user data.
////////////////////////////////////////////////////////////////////////////////
template<class TData> int testCPU(
TData *h_odata,
TData *h_idata,
int numElements,
int packedElementSize
)
{
for (int pos = 0; pos < numElements; pos++)
{
TData src = h_idata[pos];
TData dst = h_odata[pos];
for (int i = 0; i < packedElementSize; i++)
if (((char *)&src)[i] != ((char *)&dst)[i])
{
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Memory chunk size in bytes. Reused for test
const int MEM_SIZE = 50000000;
const int NUM_ITERATIONS = 32;
//GPU input and output data
unsigned char *d_idata, *d_odata;
//CPU input data and instance of GPU output data
unsigned char *h_idataCPU, *h_odataGPU;
StopWatchInterface *hTimer = NULL;
template<class TData> int runTest(int packedElementSize, int memory_size)
{
const int totalMemSizeAligned = iAlignDown(memory_size, sizeof(TData));
const int numElements = iDivDown(memory_size, sizeof(TData));
//Clean output buffer before current test
checkCudaErrors(hipMemset(d_odata, 0, memory_size));
//Run test
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int i = 0; i < NUM_ITERATIONS; i++)
{
hipLaunchKernelGGL(( testKernel<TData>), dim3(64), dim3(256), 0, 0,
(TData *)d_odata,
(TData *)d_idata,
numElements
);
getLastCudaError("testKernel() execution failed\n");
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
printf(
"Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime,
(double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0)
);
//Read back GPU results and run validation
checkCudaErrors(hipMemcpy(h_odataGPU, d_odata, memory_size, hipMemcpyDeviceToHost));
int flag = testCPU(
(TData *)h_odataGPU,
(TData *)h_idataCPU,
numElements,
packedElementSize
);
printf(flag ? "\tTEST OK\n" : "\tTEST FAILURE\n");
return !flag;
}
int main(int argc, char **argv)
{
int i, nTotalFailures = 0;
int devID;
hipDeviceProp_t deviceProp;
printf("[%s] - Starting...\n", argv[0]);
// find first CUDA device
devID = findCudaDevice(argc, (const char **)argv);
// get number of SMs on this GPU
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// Anything that is less than 192 Cores will have a scaled down workload
float scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
int MemorySize = (int)(MEM_SIZE/scale_factor) & 0xffffff00; // force multiple of 256 bytes
printf("> Compute scaling value = %4.2f\n", scale_factor);
printf("> Memory Size = %d\n", MemorySize);
sdkCreateTimer(&hTimer);
printf("Allocating memory...\n");
h_idataCPU = (unsigned char *)malloc(MemorySize);
h_odataGPU = (unsigned char *)malloc(MemorySize);
checkCudaErrors(hipMalloc((void **)&d_idata, MemorySize));
checkCudaErrors(hipMalloc((void **)&d_odata, MemorySize));
printf("Generating host input data array...\n");
for (i = 0; i < MemorySize; i++)
{
h_idataCPU[i] = (i & 0xFF) + 1;
}
printf("Uploading input data to GPU memory...\n");
checkCudaErrors(hipMemcpy(d_idata, h_idataCPU, MemorySize, hipMemcpyHostToDevice));
printf("Testing misaligned types...\n");
printf("uint8...\n");
nTotalFailures += runTest<uint8>(1, MemorySize);
printf("uint16...\n");
nTotalFailures += runTest<uint16>(2, MemorySize);
printf("RGBA8_misaligned...\n");
nTotalFailures += runTest<RGBA8_misaligned>(4, MemorySize);
printf("LA32_misaligned...\n");
nTotalFailures += runTest<LA32_misaligned>(8, MemorySize);
printf("RGB32_misaligned...\n");
nTotalFailures += runTest<RGB32_misaligned>(12, MemorySize);
printf("RGBA32_misaligned...\n");
nTotalFailures += runTest<RGBA32_misaligned>(16, MemorySize);
printf("Testing aligned types...\n");
printf("RGBA8...\n");
nTotalFailures += runTest<RGBA8>(4, MemorySize);
printf("I32...\n");
nTotalFailures += runTest<I32>(4, MemorySize);
printf("LA32...\n");
nTotalFailures += runTest<LA32>(8, MemorySize);
printf("RGB32...\n");
nTotalFailures += runTest<RGB32>(12, MemorySize);
printf("RGBA32...\n");
nTotalFailures += runTest<RGBA32>(16, MemorySize);
printf("RGBA32_2...\n");
nTotalFailures += runTest<RGBA32_2>(32, MemorySize);
printf("\n[alignedTypes] -> Test Results: %d Failures\n", nTotalFailures);
printf("Shutting down...\n");
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
free(h_odataGPU);
free(h_idataCPU);
sdkDeleteTimer(&hTimer);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
if (nTotalFailures != 0)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| f3b6b26f8a43ebc1cfd718196d1d0c8864c6b830.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test showing huge access speed gap
* between aligned and misaligned structures
* (those having/missing __align__ keyword).
* It measures per-element copy throughput for
* aligned and misaligned structures on
* big chunks of data.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <helper_functions.h> // helper utility functions
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Misaligned types
////////////////////////////////////////////////////////////////////////////////
typedef unsigned char uint8;
typedef unsigned short int uint16;
typedef struct
{
unsigned char r, g, b, a;
} RGBA8_misaligned;
typedef struct
{
unsigned int l, a;
} LA32_misaligned;
typedef struct
{
unsigned int r, g, b;
} RGB32_misaligned;
typedef struct
{
unsigned int r, g, b, a;
} RGBA32_misaligned;
////////////////////////////////////////////////////////////////////////////////
// Aligned types
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(4)
{
unsigned char r, g, b, a;
}
RGBA8;
typedef unsigned int I32;
typedef struct __align__(8)
{
unsigned int l, a;
}
LA32;
typedef struct __align__(16)
{
unsigned int r, g, b;
}
RGB32;
typedef struct __align__(16)
{
unsigned int r, g, b, a;
}
RGBA32;
////////////////////////////////////////////////////////////////////////////////
// Because G80 class hardware natively supports global memory operations
// only with data elements of 4, 8 and 16 bytes, if structure size
// exceeds 16 bytes, it can't be efficiently read or written,
// since more than one global memory non-coalescable load/store instructions
// will be generated, even if __align__ option is supplied.
// "Structure of arrays" storage strategy offers best performance
// in general case. See section 5.1.2 of the Programming Guide.
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16)
{
RGBA32 c1, c2;
}
RGBA32_2;
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b)
{
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b)
{
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b)
{
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// Simple CUDA kernel.
// Copy is carried out on per-element basis,
// so it's not per-byte in case of padded structures.
////////////////////////////////////////////////////////////////////////////////
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
////////////////////////////////////////////////////////////////////////////////
// Validation routine for simple copy kernel.
// We must know "packed" size of TData (number_of_fields * sizeof(simple_type))
// and compare only these "packed" parts of the structure,
// containig actual user data. The compiler behavior with padding bytes
// is undefined, since padding is merely a placeholder
// and doesn't contain any user data.
////////////////////////////////////////////////////////////////////////////////
template<class TData> int testCPU(
TData *h_odata,
TData *h_idata,
int numElements,
int packedElementSize
)
{
for (int pos = 0; pos < numElements; pos++)
{
TData src = h_idata[pos];
TData dst = h_odata[pos];
for (int i = 0; i < packedElementSize; i++)
if (((char *)&src)[i] != ((char *)&dst)[i])
{
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Memory chunk size in bytes. Reused for test
const int MEM_SIZE = 50000000;
const int NUM_ITERATIONS = 32;
//GPU input and output data
unsigned char *d_idata, *d_odata;
//CPU input data and instance of GPU output data
unsigned char *h_idataCPU, *h_odataGPU;
StopWatchInterface *hTimer = NULL;
template<class TData> int runTest(int packedElementSize, int memory_size)
{
const int totalMemSizeAligned = iAlignDown(memory_size, sizeof(TData));
const int numElements = iDivDown(memory_size, sizeof(TData));
//Clean output buffer before current test
checkCudaErrors(cudaMemset(d_odata, 0, memory_size));
//Run test
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int i = 0; i < NUM_ITERATIONS; i++)
{
testKernel<TData><<<64, 256>>>(
(TData *)d_odata,
(TData *)d_idata,
numElements
);
getLastCudaError("testKernel() execution failed\n");
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
printf(
"Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime,
(double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0)
);
//Read back GPU results and run validation
checkCudaErrors(cudaMemcpy(h_odataGPU, d_odata, memory_size, cudaMemcpyDeviceToHost));
int flag = testCPU(
(TData *)h_odataGPU,
(TData *)h_idataCPU,
numElements,
packedElementSize
);
printf(flag ? "\tTEST OK\n" : "\tTEST FAILURE\n");
return !flag;
}
int main(int argc, char **argv)
{
int i, nTotalFailures = 0;
int devID;
cudaDeviceProp deviceProp;
printf("[%s] - Starting...\n", argv[0]);
// find first CUDA device
devID = findCudaDevice(argc, (const char **)argv);
// get number of SMs on this GPU
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// Anything that is less than 192 Cores will have a scaled down workload
float scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
int MemorySize = (int)(MEM_SIZE/scale_factor) & 0xffffff00; // force multiple of 256 bytes
printf("> Compute scaling value = %4.2f\n", scale_factor);
printf("> Memory Size = %d\n", MemorySize);
sdkCreateTimer(&hTimer);
printf("Allocating memory...\n");
h_idataCPU = (unsigned char *)malloc(MemorySize);
h_odataGPU = (unsigned char *)malloc(MemorySize);
checkCudaErrors(cudaMalloc((void **)&d_idata, MemorySize));
checkCudaErrors(cudaMalloc((void **)&d_odata, MemorySize));
printf("Generating host input data array...\n");
for (i = 0; i < MemorySize; i++)
{
h_idataCPU[i] = (i & 0xFF) + 1;
}
printf("Uploading input data to GPU memory...\n");
checkCudaErrors(cudaMemcpy(d_idata, h_idataCPU, MemorySize, cudaMemcpyHostToDevice));
printf("Testing misaligned types...\n");
printf("uint8...\n");
nTotalFailures += runTest<uint8>(1, MemorySize);
printf("uint16...\n");
nTotalFailures += runTest<uint16>(2, MemorySize);
printf("RGBA8_misaligned...\n");
nTotalFailures += runTest<RGBA8_misaligned>(4, MemorySize);
printf("LA32_misaligned...\n");
nTotalFailures += runTest<LA32_misaligned>(8, MemorySize);
printf("RGB32_misaligned...\n");
nTotalFailures += runTest<RGB32_misaligned>(12, MemorySize);
printf("RGBA32_misaligned...\n");
nTotalFailures += runTest<RGBA32_misaligned>(16, MemorySize);
printf("Testing aligned types...\n");
printf("RGBA8...\n");
nTotalFailures += runTest<RGBA8>(4, MemorySize);
printf("I32...\n");
nTotalFailures += runTest<I32>(4, MemorySize);
printf("LA32...\n");
nTotalFailures += runTest<LA32>(8, MemorySize);
printf("RGB32...\n");
nTotalFailures += runTest<RGB32>(12, MemorySize);
printf("RGBA32...\n");
nTotalFailures += runTest<RGBA32>(16, MemorySize);
printf("RGBA32_2...\n");
nTotalFailures += runTest<RGBA32_2>(32, MemorySize);
printf("\n[alignedTypes] -> Test Results: %d Failures\n", nTotalFailures);
printf("Shutting down...\n");
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
free(h_odataGPU);
free(h_idataCPU);
sdkDeleteTimer(&hTimer);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
if (nTotalFailures != 0)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
bc72cb80f219d54a8158b9266c084df6e071690e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************************
*
* kernel.cu
*
*******************************************************************************************/
#include <math.h>
#include "support.h"
#define BLOCK_SIZE 256
__global__ void findCluster(float* xarray, float* yarray, float* tempx, float* tempy, int* countP, int numPoints, int numClusters, int clusid)
{
__shared__ float sx[2*BLOCK_SIZE];
__shared__ float sy[2*BLOCK_SIZE];
__shared__ int cp[2*BLOCK_SIZE];
//int i = blockIdx.x * blockDim.x + threadIdx.x;
int startSection = 2*blockIdx.x*blockDim.x;
int index1 = startSection + threadIdx.x;
int index2 = startSection + blockDim.x + threadIdx.x;
sx[threadIdx.x] = (index1 < numPoints) ? xarray[index1] : 0.0;
sx[blockDim.x + threadIdx.x] = (index2 < numPoints) ? xarray[index2] : 0.0;
sy[threadIdx.x] = (index1 < numPoints) ? yarray[index1] : 0.0;
sy[blockDim.x + threadIdx.x] = (index2 < numPoints) ? yarray[index2] : 0.0;
cp[threadIdx.x] = (index1 < numPoints) ? 1 : 0;
cp[blockDim.x + threadIdx.x] = (index2 < numPoints) ? 1 : 0;
__syncthreads();
// Finding points that belongs to this cluster
int id = 0;
float distMin = (sx[threadIdx.x] - cx_cm[id])*(sx[threadIdx.x] - cx_cm[id]) + (sy[threadIdx.x] - cy_cm[id])*(sy[threadIdx.x] - cy_cm[id]);
float distTemp;
for(int k = 1; k < numClusters; ++k)
{
distTemp = (sx[threadIdx.x] - cx_cm[k])*(sx[threadIdx.x] - cx_cm[k]) + (sy[threadIdx.x] - cy_cm[k])*(sy[threadIdx.x] - cy_cm[k]);
if(distTemp < distMin)
{
distMin = distTemp;
id = k;
}
}
if (id != clusid)
{
sx[threadIdx.x] = 0.0;
sy[threadIdx.x] = 0.0;
cp[threadIdx.x] = 0;
}
id = 0;
distMin = (sx[blockDim.x + threadIdx.x] - cx_cm[id])*(sx[blockDim.x + threadIdx.x] - cx_cm[id]) + (sy[blockDim.x + threadIdx.x] - cy_cm[id])*(sy[blockDim.x + threadIdx.x] - cy_cm[id]);
for(int k = 1; k < numClusters; ++k)
{
distTemp = (sx[blockDim.x + threadIdx.x] - cx_cm[k])*(sx[blockDim.x + threadIdx.x] - cx_cm[k]) + (sy[blockDim.x + threadIdx.x] - cy_cm[k])*(sy[blockDim.x + threadIdx.x] - cy_cm[k]);
if(distTemp < distMin)
{
distMin = distTemp;
id = k;
}
}
if (id != clusid)
{
sx[blockDim.x + threadIdx.x] = 0.0;
sy[blockDim.x + threadIdx.x] = 0.0;
cp[blockDim.x + threadIdx.x] = 0;
}
// printf("cp[%d] = %d\n", threadIdx.x, cp[threadIdx.x]);
// Summing the value of points and number of points with the help of reduction
for(int stride = blockDim.x; stride > 32; stride >>= 1)
{
__syncthreads();
if(threadIdx.x < stride)
{
sx[threadIdx.x] += sx[stride + threadIdx.x];
sy[threadIdx.x] += sy[stride + threadIdx.x];
cp[threadIdx.x] += cp[stride + threadIdx.x];
// printf("cp[%d] = %d\n", threadIdx.x, cp[threadIdx.x]);
}
}
//unrolling last warp here
if(threadIdx.x < 32)
{
__syncthreads();
sx[threadIdx.x] += sx[32 + threadIdx.x];
sy[threadIdx.x] += sy[32 + threadIdx.x];
cp[threadIdx.x] += cp[32 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[16 + threadIdx.x];
sy[threadIdx.x] += sy[16 + threadIdx.x];
cp[threadIdx.x] += cp[16 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[8 + threadIdx.x];
sy[threadIdx.x] += sy[8 + threadIdx.x];
cp[threadIdx.x] += cp[8 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[4 + threadIdx.x];
sy[threadIdx.x] += sy[4 + threadIdx.x];
cp[threadIdx.x] += cp[4 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[2 + threadIdx.x];
sy[threadIdx.x] += sy[2 + threadIdx.x];
cp[threadIdx.x] += cp[2 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[1 + threadIdx.x];
sy[threadIdx.x] += sy[1 + threadIdx.x];
cp[threadIdx.x] += cp[1 + threadIdx.x];
}
__syncthreads();
tempx[blockIdx.x] = sx[0];
tempy[blockIdx.x] = sy[0];
countP[blockIdx.x] = cp[0];
}
__global__ void reduceResult(float* tempx, float* tempy, int* count_d, float* reducedx, float* reducedy, int* reducCount, int num_sec)
{
__shared__ float sumx[2*BLOCK_SIZE];
__shared__ float sumy[2*BLOCK_SIZE];
__shared__ int sumcount[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
unsigned int indexin = start + t;
unsigned int indexpast = start + blockDim.x + t;
sumx[t] = (indexin < num_sec) ? tempx[indexin] : 0.0;
sumx[blockDim.x + t] = (indexpast < num_sec) ? tempx[indexpast] : 0.0;
sumy[t] = (indexin < num_sec) ? tempy[indexin] : 0.0;
sumy[blockDim.x + t] = (indexpast < num_sec) ? tempy[indexpast] : 0.0;
sumcount[t] = (indexin < num_sec) ? count_d[indexin] : 0;
sumcount[blockDim.x + t] = (indexpast < num_sec) ? count_d[indexpast] : 0;
for (unsigned int stride = blockDim.x; stride > 0; stride >>= 1)
{
__syncthreads();
if (t < stride)
{
sumx[t] += sumx[t + stride];
sumy[t] += sumy[t + stride];
sumcount[t] += sumcount[t + stride];
}
}
reducedx[blockIdx.x] = sumx[0];
reducedy[blockIdx.x] = sumy[0];
reducCount[blockIdx.x] = sumcount[0];
}
| bc72cb80f219d54a8158b9266c084df6e071690e.cu | /*******************************************************************************************
*
* kernel.cu
*
*******************************************************************************************/
#include <math.h>
#include "support.h"
#define BLOCK_SIZE 256
__global__ void findCluster(float* xarray, float* yarray, float* tempx, float* tempy, int* countP, int numPoints, int numClusters, int clusid)
{
__shared__ float sx[2*BLOCK_SIZE];
__shared__ float sy[2*BLOCK_SIZE];
__shared__ int cp[2*BLOCK_SIZE];
//int i = blockIdx.x * blockDim.x + threadIdx.x;
int startSection = 2*blockIdx.x*blockDim.x;
int index1 = startSection + threadIdx.x;
int index2 = startSection + blockDim.x + threadIdx.x;
sx[threadIdx.x] = (index1 < numPoints) ? xarray[index1] : 0.0;
sx[blockDim.x + threadIdx.x] = (index2 < numPoints) ? xarray[index2] : 0.0;
sy[threadIdx.x] = (index1 < numPoints) ? yarray[index1] : 0.0;
sy[blockDim.x + threadIdx.x] = (index2 < numPoints) ? yarray[index2] : 0.0;
cp[threadIdx.x] = (index1 < numPoints) ? 1 : 0;
cp[blockDim.x + threadIdx.x] = (index2 < numPoints) ? 1 : 0;
__syncthreads();
// Finding points that belongs to this cluster
int id = 0;
float distMin = (sx[threadIdx.x] - cx_cm[id])*(sx[threadIdx.x] - cx_cm[id]) + (sy[threadIdx.x] - cy_cm[id])*(sy[threadIdx.x] - cy_cm[id]);
float distTemp;
for(int k = 1; k < numClusters; ++k)
{
distTemp = (sx[threadIdx.x] - cx_cm[k])*(sx[threadIdx.x] - cx_cm[k]) + (sy[threadIdx.x] - cy_cm[k])*(sy[threadIdx.x] - cy_cm[k]);
if(distTemp < distMin)
{
distMin = distTemp;
id = k;
}
}
if (id != clusid)
{
sx[threadIdx.x] = 0.0;
sy[threadIdx.x] = 0.0;
cp[threadIdx.x] = 0;
}
id = 0;
distMin = (sx[blockDim.x + threadIdx.x] - cx_cm[id])*(sx[blockDim.x + threadIdx.x] - cx_cm[id]) + (sy[blockDim.x + threadIdx.x] - cy_cm[id])*(sy[blockDim.x + threadIdx.x] - cy_cm[id]);
for(int k = 1; k < numClusters; ++k)
{
distTemp = (sx[blockDim.x + threadIdx.x] - cx_cm[k])*(sx[blockDim.x + threadIdx.x] - cx_cm[k]) + (sy[blockDim.x + threadIdx.x] - cy_cm[k])*(sy[blockDim.x + threadIdx.x] - cy_cm[k]);
if(distTemp < distMin)
{
distMin = distTemp;
id = k;
}
}
if (id != clusid)
{
sx[blockDim.x + threadIdx.x] = 0.0;
sy[blockDim.x + threadIdx.x] = 0.0;
cp[blockDim.x + threadIdx.x] = 0;
}
// printf("cp[%d] = %d\n", threadIdx.x, cp[threadIdx.x]);
// Summing the value of points and number of points with the help of reduction
for(int stride = blockDim.x; stride > 32; stride >>= 1)
{
__syncthreads();
if(threadIdx.x < stride)
{
sx[threadIdx.x] += sx[stride + threadIdx.x];
sy[threadIdx.x] += sy[stride + threadIdx.x];
cp[threadIdx.x] += cp[stride + threadIdx.x];
// printf("cp[%d] = %d\n", threadIdx.x, cp[threadIdx.x]);
}
}
//unrolling last warp here
if(threadIdx.x < 32)
{
__syncthreads();
sx[threadIdx.x] += sx[32 + threadIdx.x];
sy[threadIdx.x] += sy[32 + threadIdx.x];
cp[threadIdx.x] += cp[32 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[16 + threadIdx.x];
sy[threadIdx.x] += sy[16 + threadIdx.x];
cp[threadIdx.x] += cp[16 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[8 + threadIdx.x];
sy[threadIdx.x] += sy[8 + threadIdx.x];
cp[threadIdx.x] += cp[8 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[4 + threadIdx.x];
sy[threadIdx.x] += sy[4 + threadIdx.x];
cp[threadIdx.x] += cp[4 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[2 + threadIdx.x];
sy[threadIdx.x] += sy[2 + threadIdx.x];
cp[threadIdx.x] += cp[2 + threadIdx.x];
__syncthreads();
sx[threadIdx.x] += sx[1 + threadIdx.x];
sy[threadIdx.x] += sy[1 + threadIdx.x];
cp[threadIdx.x] += cp[1 + threadIdx.x];
}
__syncthreads();
tempx[blockIdx.x] = sx[0];
tempy[blockIdx.x] = sy[0];
countP[blockIdx.x] = cp[0];
}
__global__ void reduceResult(float* tempx, float* tempy, int* count_d, float* reducedx, float* reducedy, int* reducCount, int num_sec)
{
__shared__ float sumx[2*BLOCK_SIZE];
__shared__ float sumy[2*BLOCK_SIZE];
__shared__ int sumcount[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
unsigned int indexin = start + t;
unsigned int indexpast = start + blockDim.x + t;
sumx[t] = (indexin < num_sec) ? tempx[indexin] : 0.0;
sumx[blockDim.x + t] = (indexpast < num_sec) ? tempx[indexpast] : 0.0;
sumy[t] = (indexin < num_sec) ? tempy[indexin] : 0.0;
sumy[blockDim.x + t] = (indexpast < num_sec) ? tempy[indexpast] : 0.0;
sumcount[t] = (indexin < num_sec) ? count_d[indexin] : 0;
sumcount[blockDim.x + t] = (indexpast < num_sec) ? count_d[indexpast] : 0;
for (unsigned int stride = blockDim.x; stride > 0; stride >>= 1)
{
__syncthreads();
if (t < stride)
{
sumx[t] += sumx[t + stride];
sumy[t] += sumy[t + stride];
sumcount[t] += sumcount[t + stride];
}
}
reducedx[blockIdx.x] = sumx[0];
reducedy[blockIdx.x] = sumy[0];
reducCount[blockIdx.x] = sumcount[0];
}
|
fed3bb5b111858502b5b243dbbdc46539de33109.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// managed mamory analysis - cuda lab cpu only mamory access
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
using namespace std::chrono;
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main(int argc, char** argv)
{
char* pEnd;
const int N = 2<<strtol(argv[1], &pEnd, 10); //2<<24;
size_t size = N * sizeof(int);
int *a;
hipMallocManaged(&a, size);
FILE *f;
f = fopen(argv[2], "a");
if (strtol(argv[1], &pEnd, 10) == 10) {
fprintf(f, "NumElement\t\tHost\n");
}
fprintf(f, "%d\t\t", N);
auto start = high_resolution_clock::now();
hostFunction(a, N);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
printf("Host: %d us \n", duration.count());
fprintf(f, "%d\n", duration.count());
fclose(f);
hipFree(a);
}
| fed3bb5b111858502b5b243dbbdc46539de33109.cu | /// managed mamory analysis - cuda lab cpu only mamory access
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
using namespace std::chrono;
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main(int argc, char** argv)
{
char* pEnd;
const int N = 2<<strtol(argv[1], &pEnd, 10); //2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
FILE *f;
f = fopen(argv[2], "a");
if (strtol(argv[1], &pEnd, 10) == 10) {
fprintf(f, "NumElement\t\tHost\n");
}
fprintf(f, "%d\t\t", N);
auto start = high_resolution_clock::now();
hostFunction(a, N);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
printf("Host: %d us \n", duration.count());
fprintf(f, "%d\n", duration.count());
fclose(f);
cudaFree(a);
}
|
7ded51bb17148a65bb0953a5208346fec289911f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "..\catch.hpp"
#include "..\BaseCudaTestHandler.h"
#define TEST_CUDA_CHECK_RETURN
//---------------------------------------------------------------------------------------------------
#include "..\..\GPUPatternMining/InstanceTree/InstanceTreeHelpers.h"
#include "../../GPUPatternMining/InstanceTree/IntanceTablesMapCreator.h"
//---------------------------------------------------------------------------------------------------
using namespace InstanceTreeHelpers;
//---------------------------------------------------------------------------------------------------
typedef thrust::device_vector<unsigned int> UIntThrustVector;
typedef std::shared_ptr<UIntThrustVector> UIntThrustVectorPtr;
typedef thrust::device_vector<FeatureInstance> FeatureInstanceThrustVector;
typedef std::shared_ptr<FeatureInstanceThrustVector> FeatureInstanceThrustVectorPtr;
//---------------------------------------------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert first pair count")
{
thrust::device_vector<unsigned int> keys;
{
std::vector<unsigned int> hKeys
{
0x000A000B
, 0x000A000C
, 0x000B000C
};
keys = hKeys;
}
thrust::device_vector<Entities::InstanceTable> values;
{
std::vector<Entities::InstanceTable> hValues;
Entities::InstanceTable it;
it.count = 2;
it.startIdx = 0;
hValues.push_back(it);
it.count = 3;
it.startIdx = 2;
hValues.push_back(it);
it.count = 6;
it.startIdx = 5;
hValues.push_back(it);
values = hValues;
}
auto proc = GPUUIntKeyProcessor();
auto map = IntanceTablesMapCreator::InstanceTableMap(5, &proc);
map.insertKeyValuePairs(
thrust::raw_pointer_cast(keys.data())
, thrust::raw_pointer_cast(values.data())
, 3
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::device_vector<unsigned int> result(3);
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000A, 0x000C, 0x000D };
std::vector<unsigned short> third = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
hcliquesData.push_back(third);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
dim3 insertGrid;
findSmallest2D(3, 256, insertGrid.x, insertGrid.y);
hipLaunchKernelGGL(( fillFirstPairCountFromMap) , dim3(insertGrid), dim3(256) , 0, 0,
map.getBean()
, thrust::raw_pointer_cast(cliques.data())
, 3
, result.data().get()
);
hipDeviceSynchronize();
thrust::host_vector<unsigned int> hResult = result;
std::vector<unsigned int> expected =
{
2, 3, 6
};
REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin()));
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups simple")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
2, 3, 2, 1
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 0, 1, 1, 1, 2, 2, 3
};
std::vector<unsigned int> expectedItemNumbers = {
0, 1, 0, 1, 2, 0, 1, 0
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == 8);
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups last zero")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
2, 3, 2, 0
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 0, 1, 1, 1, 2, 2
};
std::vector<unsigned int> expectedItemNumbers = {
0, 1, 0, 1, 2, 0, 1
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == 7);
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups inner zeros")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
1, 2, 0, 0, 1
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 1, 1, 4
};
std::vector<unsigned int> expectedItemNumbers = {
0, 0, 1, 0
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == 4);
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups inner zeros extended")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
1, 2, 0, 0, 4, 1
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 1, 1, 4, 4, 4, 4, 5
};
std::vector<unsigned int> expectedItemNumbers = {
0, 0, 1, 0, 1, 2, 3, 0
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == expectedGroupNumbers.size());
}
//--------------------------------------------------------------
/*
Test for graph
C3
|
A1-B1-C1-B2-A2-C2
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert first two levels")
{
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
//a1 - b1
//a2 - b2
//a2 - c2
//b1 - c1
//b2 - c1
//b2 - c3
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
auto instanceMapResult = IntanceTablesMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
thrust::device_vector<unsigned int> groupNumber;
{
std::vector<unsigned int> hGroupNumber
{
0, 0, 1, 1, 1
};
groupNumber = hGroupNumber;
}
thrust::device_vector<unsigned int> itemNumber;
{
std::vector<unsigned int> hItemNumber
{
0, 1, 0, 1, 2
};
itemNumber = hItemNumber;
}
dim3 insertGrid;
findSmallest2D(5, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<FeatureInstance> firstLevel(5);
thrust::device_vector<FeatureInstance> secondLevel(5);
hipLaunchKernelGGL(( writeFirstTwoLevels) , dim3(insertGrid), dim3(256) , 0, 0,
instanceMapResult->map->getBean()
, thrust::raw_pointer_cast(cliques.data())
, groupNumber.data()
, itemNumber.data()
, pairsA.data().get()
, pairsB.data().get()
, 5
, firstLevel.data().get()
, secondLevel.data().get()
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
std::vector<FeatureInstance> expectedFirstLevel;
{
FeatureInstance fi;
fi.field = 0x000A0001;
expectedFirstLevel.push_back(fi);
fi.field = 0x000A0002;
expectedFirstLevel.push_back(fi);
fi.field = 0x000B0001;
expectedFirstLevel.push_back(fi);
fi.field = 0x000B0002;
expectedFirstLevel.push_back(fi);
fi.field = 0x000B0002;
expectedFirstLevel.push_back(fi);
}
std::vector<FeatureInstance> expectedSecondLevel;
{
FeatureInstance fi;
//a1 - b1
//a2 - b2
//a2 - c2
//b1 - c1
//b2 - c1
//b2 - c3
fi.field = 0x000B0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000B0002;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0003;
expectedSecondLevel.push_back(fi);
}
thrust::host_vector<FeatureInstance> resultFirstLevel = firstLevel;
thrust::host_vector<FeatureInstance> resultSecondLevel = secondLevel;
REQUIRE(std::equal(expectedFirstLevel.begin(), expectedFirstLevel.end(), resultFirstLevel.begin()));
REQUIRE(std::equal(expectedSecondLevel.begin(), expectedSecondLevel.end(), resultSecondLevel.begin()));
}
//--------------------------------------------------------------
/*
Test for graph
C3-D2
|
A1-B1-C1-B2-A2-C2-D1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert third level counts")
{
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
c2 - d1
c3 - d2
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0003 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
,{ 0x000D0001 }
,{ 0x000D0002 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
auto instanceNeighboursMap = InstanceTypedNeighboursMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> expectedSecondLevel;
{
FeatureInstance fi;
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
*/
fi.field = 0x000B0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000B0002;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0003;
expectedSecondLevel.push_back(fi);
}
secondLevelInstances = expectedSecondLevel;
}
thrust::device_vector<unsigned int> group;
thrust::device_vector<thrust::device_ptr<unsigned int>> groups;
{
// g i
//a1 - b1 0 0
//a2 - b2 0 1
//a2 - c2 1 0
//b1 - c1 2 0
//b2 - c1 2 1
//b2 - c3 2 2
std::vector<unsigned int> hgroups = { 0, 0, 1, 1, 1 };
group = hgroups;
}
groups.push_back(group.data());
groups.push_back(group.data());
// ####################################################################
const unsigned int outpuCount = secondLevelInstances.size();
thrust::device_vector<unsigned int> thirdLevelCounts(outpuCount);
thrust::device_vector<unsigned int> result(outpuCount);
dim3 insertGrid;
findSmallest2D(outpuCount, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<bool> integrityMask(outpuCount, true);
hipLaunchKernelGGL(( fillWithNextLevelCountsFromTypedNeighbour) , dim3(insertGrid), dim3(256) , 0, 0,
instanceNeighboursMap->map->getBean()
, thrust::raw_pointer_cast(cliques.data())
, thrust::raw_pointer_cast(groups.data())
, secondLevelInstances.data()
, outpuCount
, 2
, integrityMask.data()
, result.data()
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
std::vector<unsigned int> expectedCounts{ 1, 2, 0, 0, 1 };
thrust::host_vector<unsigned int> resultCounts = result;
REQUIRE(std::equal(expectedCounts.begin(), expectedCounts.end(), resultCounts.begin()));
}
/*
Test for graph
C3-D2
|
A1-B1-C1-B2-A2-C2-D1
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert third..n level instances")
{
// planessweep data
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
c2 - d1
c3 - d2
*/
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0003 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
,{ 0x000D0001 }
,{ 0x000D0002 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
// instance neighbour map
auto instanceNeighboursMap = InstanceTypedNeighboursMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
// clique data
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
thrust::device_vector<unsigned int> itemsNumber;
{
std::vector<unsigned int> hItemumbers = { 0, 0, 1, 0 };
itemsNumber = hItemumbers;
}
// last ready level instances
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
// ####################################################################
const unsigned int outpuCount = forGroupGroups.back()->size();
thrust::device_vector<FeatureInstance> result(outpuCount);
dim3 insertGrid;
findSmallest2D(outpuCount, 256, insertGrid.x, insertGrid.y);
fillLevelInstancesFromNeighboursList << < insertGrid, 256 >> > (
instanceNeighboursMap->map->getBean()
, cliques.data().get()
, forGroupGroupsDevPtrs.data().get()
, itemsNumber.data()
, secondLevelInstances.data()
, pairsB.data()
, outpuCount
, 2
, result.data()
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
std::vector<FeatureInstance> expectedThirdLevelInstances;
{
FeatureInstance fi;
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
fi.field = 0x000C0001;
expectedThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
expectedThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
expectedThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
expectedThirdLevelInstances.push_back(fi);
}
thrust::host_vector<FeatureInstance> calculatedThirdLevelInstances = result;
REQUIRE(std::equal(expectedThirdLevelInstances.begin(), expectedThirdLevelInstances.end(), calculatedThirdLevelInstances.begin()));
}
// -------------------------------------------------------------------------------------------------------------------------------
/*
Test for graph
C3-D2
| /
A1-B1-C1-B2-A2-C2-D1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | check clique integrity")
{
// planessweep data
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
b2 - d2
c2 - d1
c3 - d2
*/
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0003 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
,{ 0x000D0002 }
,{ 0x000D0001 }
,{ 0x000D0002 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
// instance neighbour map
auto instanceNeighboursMap = InstanceTypedNeighboursMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
// clique data
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
thrust::device_vector<unsigned int> itemsNumber;
{
std::vector<unsigned int> hItemumbers = { 0, 0, 1, 0 };
itemsNumber = hItemumbers;
}
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<FeatureInstance> thirdLevelInstances;
{
std::vector<FeatureInstance> hThirdLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
hThirdLevelInstances.push_back(fi);
}
thirdLevelInstances = hThirdLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
hInstancesOnLevels.push_back(thirdLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
const unsigned int outpuCount = forGroupGroups.back()->size();
thrust::device_vector<bool> result(outpuCount);
dim3 insertGrid;
findSmallest2D(outpuCount, 256, insertGrid.x, insertGrid.y);
hipLaunchKernelGGL(( markAsPartOfCurrentCliqueInstance) , dim3(insertGrid), dim3(256) , 0, 0,
instanceNeighboursMap->map->getBean()
, forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, thirdLevelInstances.data()
, pairsB.data()
, outpuCount
, 2
, result.data()
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
std::vector<bool> expected = { false, false, false, true };
thrust::host_vector<bool> calculated = result;
REQUIRE(std::equal(expected.begin(), expected.end(), calculated.begin()));
}
// ---------------------------------------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | reverese generate simple 1")
{
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<FeatureInstance> thirdLevelInstances;
{
std::vector<FeatureInstance> hThirdLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
hThirdLevelInstances.push_back(fi);
}
thirdLevelInstances = hThirdLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
hInstancesOnLevels.push_back(thirdLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
// write positions
thrust::device_vector<unsigned int> writePositions;
{
std::vector<unsigned int> hWritePositions = { 0, 1, 1, 2 };
writePositions = hWritePositions;
}
// integrity mask
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask = { true, true, false, true };
integrityMask = hIntegrityMask;
}
// result
const unsigned int endCount = 4;
const unsigned int consistentCount = 3;
const unsigned int cliqueSize = 3;
thrust::device_vector<FeatureInstance> result(consistentCount * cliqueSize);
thrust::device_vector<unsigned int> instancesCliques(consistentCount);
dim3 insertGrid;
findSmallest2D(endCount, 256, insertGrid.x, insertGrid.y);
reverseGenerateCliquesInstances << < insertGrid, 256 >> > (
forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, endCount
, consistentCount
, cliqueSize
, integrityMask.data()
, writePositions.data()
, result.data()
, instancesCliques.data()
);
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> expected = {
{ 0x000A0001 }, { 0x000A0002 }, { 0x000B0002 }
,{ 0x000B0001 }, { 0x000B0002 }, { 0x000C0003 }
,{ 0x000C0001 }, { 0x000C0001 }, { 0x000D0002 }
};
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<FeatureInstance> copmuted = result;
REQUIRE(std::equal(expected.begin(), expected.end(), copmuted.begin()));
std::vector<unsigned int> expectedInstancesId = { 0, 0, 1 };
thrust::host_vector<unsigned int> calculatedCliqueId = instancesCliques;
REQUIRE(std::equal(expectedInstancesId.begin(), expectedInstancesId.end(), calculatedCliqueId.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | reverese generate simple 2")
{
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<FeatureInstance> thirdLevelInstances;
{
std::vector<FeatureInstance> hThirdLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
hThirdLevelInstances.push_back(fi);
}
thirdLevelInstances = hThirdLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
hInstancesOnLevels.push_back(thirdLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
// write positions
thrust::device_vector<unsigned int> writePositions;
{
std::vector<unsigned int> hWritePositions = { 0, 1, 1, 2 };
writePositions = hWritePositions;
}
// integrity mask
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask = { true, false, true, true };
integrityMask = hIntegrityMask;
}
// result
const unsigned int endCount = 4;
const unsigned int consistentCount = 3;
const unsigned int cliqueSize = 3;
thrust::device_vector<FeatureInstance> result(consistentCount * cliqueSize);
dim3 insertGrid;
findSmallest2D(endCount, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<unsigned int> instancesCliques(consistentCount);
reverseGenerateCliquesInstances << < insertGrid, 256 >> > (
forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, endCount
, consistentCount
, cliqueSize
, integrityMask.data()
, writePositions.data()
, result.data()
, instancesCliques.data()
);
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> expected = {
{ 0x000A0001 },{ 0x000A0002 },{ 0x000B0002 }
,{ 0x000B0001 },{ 0x000B0002 },{ 0x000C0003 }
,{ 0x000C0001 },{ 0x000C0003 },{ 0x000D0002 }
};
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<FeatureInstance> copmuted = result;
REQUIRE(std::equal(expected.begin(), expected.end(), copmuted.begin()));
std::vector<unsigned int> expectedInstancesId = { 0, 0, 1 };
thrust::host_vector<unsigned int> calculatedCliqueId = instancesCliques;
REQUIRE(std::equal(expectedInstancesId.begin(), expectedInstancesId.end(), calculatedCliqueId.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | reverese generate simple 2-size")
{
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
// write positions
thrust::device_vector<unsigned int> writePositions;
{
std::vector<unsigned int> hWritePositions = { 0, 1, 1, 2, 3 };
writePositions = hWritePositions;
}
// integrity mask
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask = { true, false, true, true, true };
integrityMask = hIntegrityMask;
}
// result
const unsigned int endCount = 5;
const unsigned int consistentCount = 4;
const unsigned int cliqueSize = 2;
thrust::device_vector<FeatureInstance> result(consistentCount * cliqueSize);
dim3 insertGrid;
findSmallest2D(endCount, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<unsigned int> instancesCliques(consistentCount);
reverseGenerateCliquesInstances << < insertGrid, 256 >> > (
forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, endCount
, consistentCount
, cliqueSize
, integrityMask.data()
, writePositions.data()
, result.data()
, instancesCliques.data()
);
/*
a1-b1 true
a2-b2 false
b1-c1 true
b2-c1 true
b2-c3 true
*/
std::vector<FeatureInstance> expected = {
{ 0x000A0001 },{ 0x000B0001 },{ 0x000B0002 },{ 0x000B0002 }
,{ 0x000B0001 },{ 0x000C0001 },{ 0x000C0001 },{ 0x000C0003 }
};
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<FeatureInstance> copmuted = result;
REQUIRE(std::equal(expected.begin(), expected.end(), copmuted.begin()));
std::vector<unsigned int> expectedInstancesId = { 0, 1, 1, 1 };
thrust::host_vector<unsigned int> calculatedCliqueId = instancesCliques;
REQUIRE(std::equal(expectedInstancesId.begin(), expectedInstancesId.end(), calculatedCliqueId.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | generate write positions")
{
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask{ true, true, true, false, false, true };
integrityMask = hIntegrityMask;
}
thrust::device_vector<unsigned int> writePositions(6);
auto consistentCount = fillWritePositionsAndReturnCount(
integrityMask
, writePositions
, 6
);
std::vector<unsigned int> expected = { 0, 1, 2, 3, 3, 3 };
thrust::host_vector<unsigned int> calculated = writePositions;
REQUIRE(consistentCount == 4);
REQUIRE(std::equal(expected.begin(), expected.end(), calculated.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | generate write positions, redundant integrity mask")
{
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask{ true, true, true, false, false, true, false, false, true };
integrityMask = hIntegrityMask;
}
thrust::device_vector<unsigned int> writePositions(6);
auto consistentCount = fillWritePositionsAndReturnCount(
integrityMask
, writePositions
, 6
);
std::vector<unsigned int> expected = { 0, 1, 2, 3, 3, 3 };
thrust::host_vector<unsigned int> calculated = writePositions;
REQUIRE(consistentCount == 4);
REQUIRE(std::equal(expected.begin(), expected.end(), calculated.begin()));
} | 7ded51bb17148a65bb0953a5208346fec289911f.cu | #include "..\catch.hpp"
#include "..\BaseCudaTestHandler.h"
#define TEST_CUDA_CHECK_RETURN
//---------------------------------------------------------------------------------------------------
#include "..\..\GPUPatternMining/InstanceTree/InstanceTreeHelpers.h"
#include "../../GPUPatternMining/InstanceTree/IntanceTablesMapCreator.h"
//---------------------------------------------------------------------------------------------------
using namespace InstanceTreeHelpers;
//---------------------------------------------------------------------------------------------------
typedef thrust::device_vector<unsigned int> UIntThrustVector;
typedef std::shared_ptr<UIntThrustVector> UIntThrustVectorPtr;
typedef thrust::device_vector<FeatureInstance> FeatureInstanceThrustVector;
typedef std::shared_ptr<FeatureInstanceThrustVector> FeatureInstanceThrustVectorPtr;
//---------------------------------------------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert first pair count")
{
thrust::device_vector<unsigned int> keys;
{
std::vector<unsigned int> hKeys
{
0x000A000B
, 0x000A000C
, 0x000B000C
};
keys = hKeys;
}
thrust::device_vector<Entities::InstanceTable> values;
{
std::vector<Entities::InstanceTable> hValues;
Entities::InstanceTable it;
it.count = 2;
it.startIdx = 0;
hValues.push_back(it);
it.count = 3;
it.startIdx = 2;
hValues.push_back(it);
it.count = 6;
it.startIdx = 5;
hValues.push_back(it);
values = hValues;
}
auto proc = GPUUIntKeyProcessor();
auto map = IntanceTablesMapCreator::InstanceTableMap(5, &proc);
map.insertKeyValuePairs(
thrust::raw_pointer_cast(keys.data())
, thrust::raw_pointer_cast(values.data())
, 3
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::device_vector<unsigned int> result(3);
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000A, 0x000C, 0x000D };
std::vector<unsigned short> third = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
hcliquesData.push_back(third);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
dim3 insertGrid;
findSmallest2D(3, 256, insertGrid.x, insertGrid.y);
fillFirstPairCountFromMap <<< insertGrid, 256 >>>(
map.getBean()
, thrust::raw_pointer_cast(cliques.data())
, 3
, result.data().get()
);
cudaDeviceSynchronize();
thrust::host_vector<unsigned int> hResult = result;
std::vector<unsigned int> expected =
{
2, 3, 6
};
REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin()));
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups simple")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
2, 3, 2, 1
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 0, 1, 1, 1, 2, 2, 3
};
std::vector<unsigned int> expectedItemNumbers = {
0, 1, 0, 1, 2, 0, 1, 0
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == 8);
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups last zero")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
2, 3, 2, 0
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 0, 1, 1, 1, 2, 2
};
std::vector<unsigned int> expectedItemNumbers = {
0, 1, 0, 1, 2, 0, 1
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == 7);
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups inner zeros")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
1, 2, 0, 0, 1
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 1, 1, 4
};
std::vector<unsigned int> expectedItemNumbers = {
0, 0, 1, 0
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == 4);
}
//--------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | for groups inner zeros extended")
{
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hCount = {
1, 2, 0, 0, 4, 1
};
counts = hCount;
}
auto result = forGroups(counts);
std::vector<unsigned int> expectedGroupNumbers = {
0, 1, 1, 4, 4, 4, 4, 5
};
std::vector<unsigned int> expectedItemNumbers = {
0, 0, 1, 0, 1, 2, 3, 0
};
thrust::host_vector<unsigned int> resultGroupNumbers = result->groupNumbers;
thrust::host_vector<unsigned int> resultItemNumbers = result->itemNumbers;
REQUIRE(std::equal(expectedGroupNumbers.begin(), expectedGroupNumbers.end(), resultGroupNumbers.begin()));
REQUIRE(std::equal(expectedItemNumbers.begin(), expectedItemNumbers.end(), resultItemNumbers.begin()));
REQUIRE(result->threadCount == expectedGroupNumbers.size());
}
//--------------------------------------------------------------
/*
Test for graph
C3
|
A1-B1-C1-B2-A2-C2
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert first two levels")
{
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
//a1 - b1
//a2 - b2
//a2 - c2
//b1 - c1
//b2 - c1
//b2 - c3
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
auto instanceMapResult = IntanceTablesMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
thrust::device_vector<unsigned int> groupNumber;
{
std::vector<unsigned int> hGroupNumber
{
0, 0, 1, 1, 1
};
groupNumber = hGroupNumber;
}
thrust::device_vector<unsigned int> itemNumber;
{
std::vector<unsigned int> hItemNumber
{
0, 1, 0, 1, 2
};
itemNumber = hItemNumber;
}
dim3 insertGrid;
findSmallest2D(5, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<FeatureInstance> firstLevel(5);
thrust::device_vector<FeatureInstance> secondLevel(5);
writeFirstTwoLevels <<< insertGrid, 256 >>> (
instanceMapResult->map->getBean()
, thrust::raw_pointer_cast(cliques.data())
, groupNumber.data()
, itemNumber.data()
, pairsA.data().get()
, pairsB.data().get()
, 5
, firstLevel.data().get()
, secondLevel.data().get()
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::vector<FeatureInstance> expectedFirstLevel;
{
FeatureInstance fi;
fi.field = 0x000A0001;
expectedFirstLevel.push_back(fi);
fi.field = 0x000A0002;
expectedFirstLevel.push_back(fi);
fi.field = 0x000B0001;
expectedFirstLevel.push_back(fi);
fi.field = 0x000B0002;
expectedFirstLevel.push_back(fi);
fi.field = 0x000B0002;
expectedFirstLevel.push_back(fi);
}
std::vector<FeatureInstance> expectedSecondLevel;
{
FeatureInstance fi;
//a1 - b1
//a2 - b2
//a2 - c2
//b1 - c1
//b2 - c1
//b2 - c3
fi.field = 0x000B0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000B0002;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0003;
expectedSecondLevel.push_back(fi);
}
thrust::host_vector<FeatureInstance> resultFirstLevel = firstLevel;
thrust::host_vector<FeatureInstance> resultSecondLevel = secondLevel;
REQUIRE(std::equal(expectedFirstLevel.begin(), expectedFirstLevel.end(), resultFirstLevel.begin()));
REQUIRE(std::equal(expectedSecondLevel.begin(), expectedSecondLevel.end(), resultSecondLevel.begin()));
}
//--------------------------------------------------------------
/*
Test for graph
C3-D2
|
A1-B1-C1-B2-A2-C2-D1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert third level counts")
{
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
c2 - d1
c3 - d2
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0003 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
,{ 0x000D0001 }
,{ 0x000D0002 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
auto instanceNeighboursMap = InstanceTypedNeighboursMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> expectedSecondLevel;
{
FeatureInstance fi;
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
*/
fi.field = 0x000B0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000B0002;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0001;
expectedSecondLevel.push_back(fi);
fi.field = 0x000C0003;
expectedSecondLevel.push_back(fi);
}
secondLevelInstances = expectedSecondLevel;
}
thrust::device_vector<unsigned int> group;
thrust::device_vector<thrust::device_ptr<unsigned int>> groups;
{
// g i
//a1 - b1 0 0
//a2 - b2 0 1
//a2 - c2 1 0
//b1 - c1 2 0
//b2 - c1 2 1
//b2 - c3 2 2
std::vector<unsigned int> hgroups = { 0, 0, 1, 1, 1 };
group = hgroups;
}
groups.push_back(group.data());
groups.push_back(group.data());
// ####################################################################
const unsigned int outpuCount = secondLevelInstances.size();
thrust::device_vector<unsigned int> thirdLevelCounts(outpuCount);
thrust::device_vector<unsigned int> result(outpuCount);
dim3 insertGrid;
findSmallest2D(outpuCount, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<bool> integrityMask(outpuCount, true);
fillWithNextLevelCountsFromTypedNeighbour <<< insertGrid, 256 >>> (
instanceNeighboursMap->map->getBean()
, thrust::raw_pointer_cast(cliques.data())
, thrust::raw_pointer_cast(groups.data())
, secondLevelInstances.data()
, outpuCount
, 2
, integrityMask.data()
, result.data()
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::vector<unsigned int> expectedCounts{ 1, 2, 0, 0, 1 };
thrust::host_vector<unsigned int> resultCounts = result;
REQUIRE(std::equal(expectedCounts.begin(), expectedCounts.end(), resultCounts.begin()));
}
/*
Test for graph
C3-D2
|
A1-B1-C1-B2-A2-C2-D1
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | insert third..n level instances")
{
// planessweep data
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
c2 - d1
c3 - d2
*/
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0003 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
,{ 0x000D0001 }
,{ 0x000D0002 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
// instance neighbour map
auto instanceNeighboursMap = InstanceTypedNeighboursMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
// clique data
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
thrust::device_vector<unsigned int> itemsNumber;
{
std::vector<unsigned int> hItemumbers = { 0, 0, 1, 0 };
itemsNumber = hItemumbers;
}
// last ready level instances
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
// ####################################################################
const unsigned int outpuCount = forGroupGroups.back()->size();
thrust::device_vector<FeatureInstance> result(outpuCount);
dim3 insertGrid;
findSmallest2D(outpuCount, 256, insertGrid.x, insertGrid.y);
fillLevelInstancesFromNeighboursList << < insertGrid, 256 >> > (
instanceNeighboursMap->map->getBean()
, cliques.data().get()
, forGroupGroupsDevPtrs.data().get()
, itemsNumber.data()
, secondLevelInstances.data()
, pairsB.data()
, outpuCount
, 2
, result.data()
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::vector<FeatureInstance> expectedThirdLevelInstances;
{
FeatureInstance fi;
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
fi.field = 0x000C0001;
expectedThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
expectedThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
expectedThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
expectedThirdLevelInstances.push_back(fi);
}
thrust::host_vector<FeatureInstance> calculatedThirdLevelInstances = result;
REQUIRE(std::equal(expectedThirdLevelInstances.begin(), expectedThirdLevelInstances.end(), calculatedThirdLevelInstances.begin()));
}
// -------------------------------------------------------------------------------------------------------------------------------
/*
Test for graph
C3-D2
| /
A1-B1-C1-B2-A2-C2-D1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | check clique integrity")
{
// planessweep data
thrust::device_vector<FeatureInstance> pairsA;
thrust::device_vector<FeatureInstance> pairsB;
{
/*
a1 - b1
a2 - b2
a2 - c2
b1 - c1
b2 - c1
b2 - c3
b2 - d2
c2 - d1
c3 - d2
*/
std::vector<FeatureInstance> hPairsA = {
{ 0x000A0001 }
,{ 0x000A0002 }
,{ 0x000A0002 }
,{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0003 }
};
std::vector<FeatureInstance> hPairsB = {
{ 0x000B0001 }
,{ 0x000B0002 }
,{ 0x000C0002 }
,{ 0x000C0001 }
,{ 0x000C0001 }
,{ 0x000C0003 }
,{ 0x000D0002 }
,{ 0x000D0001 }
,{ 0x000D0002 }
};
pairsA = hPairsA;
pairsB = hPairsB;
}
// instance neighbour map
auto instanceNeighboursMap = InstanceTypedNeighboursMapCreator::createTypedNeighboursListMap(
pairsA
, pairsB
);
// clique data
thrust::device_vector<thrust::device_vector<unsigned short>> cliquesData;
thrust::host_vector<thrust::device_vector<unsigned short>> hcliquesData;
{
std::vector<unsigned short> first = { 0x000A, 0x000B, 0x000C };
std::vector<unsigned short> second = { 0x000B, 0x000C, 0x000D };
hcliquesData.push_back(first);
hcliquesData.push_back(second);
cliquesData = hcliquesData;
}
thrust::device_vector<thrust::device_ptr<const unsigned short>> cliques;
{
std::vector<thrust::device_ptr<const unsigned short>> hcliques;
for (const thrust::device_vector<unsigned short>& vec : hcliquesData)
hcliques.push_back(vec.data());
cliques = hcliques;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
thrust::device_vector<unsigned int> itemsNumber;
{
std::vector<unsigned int> hItemumbers = { 0, 0, 1, 0 };
itemsNumber = hItemumbers;
}
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<FeatureInstance> thirdLevelInstances;
{
std::vector<FeatureInstance> hThirdLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
hThirdLevelInstances.push_back(fi);
}
thirdLevelInstances = hThirdLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
hInstancesOnLevels.push_back(thirdLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
const unsigned int outpuCount = forGroupGroups.back()->size();
thrust::device_vector<bool> result(outpuCount);
dim3 insertGrid;
findSmallest2D(outpuCount, 256, insertGrid.x, insertGrid.y);
markAsPartOfCurrentCliqueInstance <<< insertGrid, 256 >>> (
instanceNeighboursMap->map->getBean()
, forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, thirdLevelInstances.data()
, pairsB.data()
, outpuCount
, 2
, result.data()
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
std::vector<bool> expected = { false, false, false, true };
thrust::host_vector<bool> calculated = result;
REQUIRE(std::equal(expected.begin(), expected.end(), calculated.begin()));
}
// ---------------------------------------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | reverese generate simple 1")
{
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<FeatureInstance> thirdLevelInstances;
{
std::vector<FeatureInstance> hThirdLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
hThirdLevelInstances.push_back(fi);
}
thirdLevelInstances = hThirdLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
hInstancesOnLevels.push_back(thirdLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
// write positions
thrust::device_vector<unsigned int> writePositions;
{
std::vector<unsigned int> hWritePositions = { 0, 1, 1, 2 };
writePositions = hWritePositions;
}
// integrity mask
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask = { true, true, false, true };
integrityMask = hIntegrityMask;
}
// result
const unsigned int endCount = 4;
const unsigned int consistentCount = 3;
const unsigned int cliqueSize = 3;
thrust::device_vector<FeatureInstance> result(consistentCount * cliqueSize);
thrust::device_vector<unsigned int> instancesCliques(consistentCount);
dim3 insertGrid;
findSmallest2D(endCount, 256, insertGrid.x, insertGrid.y);
reverseGenerateCliquesInstances << < insertGrid, 256 >> > (
forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, endCount
, consistentCount
, cliqueSize
, integrityMask.data()
, writePositions.data()
, result.data()
, instancesCliques.data()
);
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> expected = {
{ 0x000A0001 }, { 0x000A0002 }, { 0x000B0002 }
,{ 0x000B0001 }, { 0x000B0002 }, { 0x000C0003 }
,{ 0x000C0001 }, { 0x000C0001 }, { 0x000D0002 }
};
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<FeatureInstance> copmuted = result;
REQUIRE(std::equal(expected.begin(), expected.end(), copmuted.begin()));
std::vector<unsigned int> expectedInstancesId = { 0, 0, 1 };
thrust::host_vector<unsigned int> calculatedCliqueId = instancesCliques;
REQUIRE(std::equal(expectedInstancesId.begin(), expectedInstancesId.end(), calculatedCliqueId.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | reverese generate simple 2")
{
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<FeatureInstance> thirdLevelInstances;
{
std::vector<FeatureInstance> hThirdLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hThirdLevelInstances.push_back(fi);
fi.field = 0x000D0002;
hThirdLevelInstances.push_back(fi);
}
thirdLevelInstances = hThirdLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
hInstancesOnLevels.push_back(thirdLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
/*
groups count
1, 2, 0, 0, 1
*/
std::vector<unsigned int> hthirdLevel = { 0, 1, 1, 4 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hthirdLevel));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
// write positions
thrust::device_vector<unsigned int> writePositions;
{
std::vector<unsigned int> hWritePositions = { 0, 1, 1, 2 };
writePositions = hWritePositions;
}
// integrity mask
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask = { true, false, true, true };
integrityMask = hIntegrityMask;
}
// result
const unsigned int endCount = 4;
const unsigned int consistentCount = 3;
const unsigned int cliqueSize = 3;
thrust::device_vector<FeatureInstance> result(consistentCount * cliqueSize);
dim3 insertGrid;
findSmallest2D(endCount, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<unsigned int> instancesCliques(consistentCount);
reverseGenerateCliquesInstances << < insertGrid, 256 >> > (
forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, endCount
, consistentCount
, cliqueSize
, integrityMask.data()
, writePositions.data()
, result.data()
, instancesCliques.data()
);
/*
a1-b1-c1 1
a2-b2 c1 2
c3
b1-c1- 0
b2-c1- 0
b2-c3-d2 1
*/
std::vector<FeatureInstance> expected = {
{ 0x000A0001 },{ 0x000A0002 },{ 0x000B0002 }
,{ 0x000B0001 },{ 0x000B0002 },{ 0x000C0003 }
,{ 0x000C0001 },{ 0x000C0003 },{ 0x000D0002 }
};
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<FeatureInstance> copmuted = result;
REQUIRE(std::equal(expected.begin(), expected.end(), copmuted.begin()));
std::vector<unsigned int> expectedInstancesId = { 0, 0, 1 };
thrust::host_vector<unsigned int> calculatedCliqueId = instancesCliques;
REQUIRE(std::equal(expectedInstancesId.begin(), expectedInstancesId.end(), calculatedCliqueId.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | reverese generate simple 2-size")
{
// instances levels
thrust::device_vector<FeatureInstance> firstLevelInstances;
{
std::vector<FeatureInstance> hFirstLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000A0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000A0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0001;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hFirstLevelInstances.push_back(fi);
}
firstLevelInstances = hFirstLevelInstances;
}
thrust::device_vector<FeatureInstance> secondLevelInstances;
{
std::vector<FeatureInstance> hSecondLevelInstances;
{
FeatureInstance fi;
fi.field = 0x000B0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000B0002;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0001;
hSecondLevelInstances.push_back(fi);
fi.field = 0x000C0003;
hSecondLevelInstances.push_back(fi);
}
secondLevelInstances = hSecondLevelInstances;
}
thrust::device_vector<thrust::device_ptr<FeatureInstance>> instancesOnLevels;
{
std::vector<thrust::device_ptr<FeatureInstance>> hInstancesOnLevels;
hInstancesOnLevels.push_back(firstLevelInstances.data());
hInstancesOnLevels.push_back(secondLevelInstances.data());
instancesOnLevels = hInstancesOnLevels;
}
// forgroups result
std::vector<UIntThrustVectorPtr> forGroupGroups;
thrust::device_vector<thrust::device_ptr<unsigned int>> forGroupGroupsDevPtrs;
{
std::vector<thrust::device_ptr<unsigned int>> tempDevPtr;
std::vector<unsigned int> hFirstLevelGroups = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hFirstLevelGroups));
tempDevPtr.push_back(forGroupGroups.back()->data());
std::vector<unsigned int> hsecondLevelGroup = { 0, 0, 1, 1, 1 };
forGroupGroups.push_back(std::make_shared<UIntThrustVector>(hsecondLevelGroup));
tempDevPtr.push_back(forGroupGroups.back()->data());
forGroupGroupsDevPtrs = tempDevPtr;
}
// write positions
thrust::device_vector<unsigned int> writePositions;
{
std::vector<unsigned int> hWritePositions = { 0, 1, 1, 2, 3 };
writePositions = hWritePositions;
}
// integrity mask
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask = { true, false, true, true, true };
integrityMask = hIntegrityMask;
}
// result
const unsigned int endCount = 5;
const unsigned int consistentCount = 4;
const unsigned int cliqueSize = 2;
thrust::device_vector<FeatureInstance> result(consistentCount * cliqueSize);
dim3 insertGrid;
findSmallest2D(endCount, 256, insertGrid.x, insertGrid.y);
thrust::device_vector<unsigned int> instancesCliques(consistentCount);
reverseGenerateCliquesInstances << < insertGrid, 256 >> > (
forGroupGroupsDevPtrs.data().get()
, instancesOnLevels.data().get()
, endCount
, consistentCount
, cliqueSize
, integrityMask.data()
, writePositions.data()
, result.data()
, instancesCliques.data()
);
/*
a1-b1 true
a2-b2 false
b1-c1 true
b2-c1 true
b2-c3 true
*/
std::vector<FeatureInstance> expected = {
{ 0x000A0001 },{ 0x000B0001 },{ 0x000B0002 },{ 0x000B0002 }
,{ 0x000B0001 },{ 0x000C0001 },{ 0x000C0001 },{ 0x000C0003 }
};
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<FeatureInstance> copmuted = result;
REQUIRE(std::equal(expected.begin(), expected.end(), copmuted.begin()));
std::vector<unsigned int> expectedInstancesId = { 0, 1, 1, 1 };
thrust::host_vector<unsigned int> calculatedCliqueId = instancesCliques;
REQUIRE(std::equal(expectedInstancesId.begin(), expectedInstancesId.end(), calculatedCliqueId.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | generate write positions")
{
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask{ true, true, true, false, false, true };
integrityMask = hIntegrityMask;
}
thrust::device_vector<unsigned int> writePositions(6);
auto consistentCount = fillWritePositionsAndReturnCount(
integrityMask
, writePositions
, 6
);
std::vector<unsigned int> expected = { 0, 1, 2, 3, 3, 3 };
thrust::host_vector<unsigned int> calculated = writePositions;
REQUIRE(consistentCount == 4);
REQUIRE(std::equal(expected.begin(), expected.end(), calculated.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "Instance tree helpers | generate write positions, redundant integrity mask")
{
thrust::device_vector<bool> integrityMask;
{
std::vector<bool> hIntegrityMask{ true, true, true, false, false, true, false, false, true };
integrityMask = hIntegrityMask;
}
thrust::device_vector<unsigned int> writePositions(6);
auto consistentCount = fillWritePositionsAndReturnCount(
integrityMask
, writePositions
, 6
);
std::vector<unsigned int> expected = { 0, 1, 2, 3, 3, 3 };
thrust::host_vector<unsigned int> calculated = writePositions;
REQUIRE(consistentCount == 4);
REQUIRE(std::equal(expected.begin(), expected.end(), calculated.begin()));
} |
a01441a22b1fc338102f04685b4755bdfebbadf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "noopstateless.h"
void crossbowKernelNoopStateless (void *args) {
crossbowStreamP s = (crossbowStreamP) args;
crossbowVariableP theInput = (crossbowVariableP) (s->op->kernel->inputs[0]);
crossbowDataBufferP input = crossbowStreamGetCurrentInput (s);
int length = theInput->schema->bytes;
crossbowDataBufferP output = crossbowStreamGetCurrentOutput (s);
#ifndef CUDART_NOOP
hipMemcpyAsync (output->dev, input->dev, length, hipMemcpyDeviceToDevice, s->stream[s->op->branch]);
#else
UNUSED (output);
UNUSED (input);
UNUSED (length);
#endif
/* Store output in stream */
crossbowListAppend(s->outputs[s->op->id], output);
return;
}
| a01441a22b1fc338102f04685b4755bdfebbadf5.cu | #include "noopstateless.h"
void crossbowKernelNoopStateless (void *args) {
crossbowStreamP s = (crossbowStreamP) args;
crossbowVariableP theInput = (crossbowVariableP) (s->op->kernel->inputs[0]);
crossbowDataBufferP input = crossbowStreamGetCurrentInput (s);
int length = theInput->schema->bytes;
crossbowDataBufferP output = crossbowStreamGetCurrentOutput (s);
#ifndef CUDART_NOOP
cudaMemcpyAsync (output->dev, input->dev, length, cudaMemcpyDeviceToDevice, s->stream[s->op->branch]);
#else
UNUSED (output);
UNUSED (input);
UNUSED (length);
#endif
/* Store output in stream */
crossbowListAppend(s->outputs[s->op->id], output);
return;
}
|
0cfe480d67662412ca07bf2c3ff314c3a5081ac5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <ctime>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb/stb_image_write.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <helperUtils.cuh>
using namespace TinyRT;
__global__ void render(float* const pixelBuffer, const int imageWidth, const int imageHeight, const int channelNum) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col >= imageWidth || row >= imageHeight)
return;
const int idx = (row * imageWidth + col) * channelNum;
pixelBuffer[idx] = static_cast<float>(row) / static_cast<float>(imageHeight - 1);
pixelBuffer[idx + 1] = static_cast<float>(col) / static_cast<float>(imageWidth - 1);
pixelBuffer[idx + 2] = 0.25f;
}
int main() {
/* image config */
constexpr int imageWidth = 400;
constexpr int imageHeight = 250;
/* image output file */
const std::string fileName("output.png");
/* thread block config */
constexpr int threadBlockWidth = 8;
constexpr int threadBlockHeight = 8;
// preparation
constexpr int channelNum = 3; // rgb
constexpr int pixelNum = imageWidth * imageHeight;
const size_t pixelBufferBytes = pixelNum * channelNum * sizeof(float);
float* pixelBuffer;
checkCudaErrors(hipMallocManaged(&pixelBuffer, pixelBufferBytes));
// start timer
const clock_t start = clock();
dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1);
dim3 threadDim(threadBlockWidth, threadBlockHeight);
// render the image into buffer
hipLaunchKernelGGL(( render), dim3(blockDim), dim3(threadDim), 0, 0, pixelBuffer, imageWidth, imageHeight, channelNum);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// stop timer
const clock_t stop = clock();
// measure rendering time
const auto renderingMillisecond = stop - start;
// other image writer arguments
constexpr int imageSize = pixelNum * channelNum;
constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char);
const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]);
// store the pixel data into writing buffer as 8bit color
for (int idx = 0; idx < imageSize; ++idx) {
const auto value = pixelBuffer[idx];
pixelDataPtr[idx] = static_cast<unsigned char>(255 * value);
}
// print rendering time
std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl;
// write pixel data to output file
stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes);
// clean
checkCudaErrors(hipFree(pixelBuffer));
return 0;
} | 0cfe480d67662412ca07bf2c3ff314c3a5081ac5.cu | #include <iostream>
#include <fstream>
#include <ctime>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb/stb_image_write.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <helperUtils.cuh>
using namespace TinyRT;
__global__ void render(float* const pixelBuffer, const int imageWidth, const int imageHeight, const int channelNum) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col >= imageWidth || row >= imageHeight)
return;
const int idx = (row * imageWidth + col) * channelNum;
pixelBuffer[idx] = static_cast<float>(row) / static_cast<float>(imageHeight - 1);
pixelBuffer[idx + 1] = static_cast<float>(col) / static_cast<float>(imageWidth - 1);
pixelBuffer[idx + 2] = 0.25f;
}
int main() {
/* image config */
constexpr int imageWidth = 400;
constexpr int imageHeight = 250;
/* image output file */
const std::string fileName("output.png");
/* thread block config */
constexpr int threadBlockWidth = 8;
constexpr int threadBlockHeight = 8;
// preparation
constexpr int channelNum = 3; // rgb
constexpr int pixelNum = imageWidth * imageHeight;
const size_t pixelBufferBytes = pixelNum * channelNum * sizeof(float);
float* pixelBuffer;
checkCudaErrors(cudaMallocManaged(&pixelBuffer, pixelBufferBytes));
// start timer
const clock_t start = clock();
dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1);
dim3 threadDim(threadBlockWidth, threadBlockHeight);
// render the image into buffer
render<<<blockDim, threadDim>>>(pixelBuffer, imageWidth, imageHeight, channelNum);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// stop timer
const clock_t stop = clock();
// measure rendering time
const auto renderingMillisecond = stop - start;
// other image writer arguments
constexpr int imageSize = pixelNum * channelNum;
constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char);
const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]);
// store the pixel data into writing buffer as 8bit color
for (int idx = 0; idx < imageSize; ++idx) {
const auto value = pixelBuffer[idx];
pixelDataPtr[idx] = static_cast<unsigned char>(255 * value);
}
// print rendering time
std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl;
// write pixel data to output file
stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes);
// clean
checkCudaErrors(cudaFree(pixelBuffer));
return 0;
} |
e367b7a9d31f3460d9ee611c8a4bedb7a12f76d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Unittests for the boundary conditions
* @file TestGpuStream.cu
* @author Adam Koleszar ([email protected])
*/
#include "CuTest.h"
#include "GpuFunctions.h"
#include "ShellFunctions.h"
#include "ComputeResiduals.h"
#include "FloatType.h"
#include "TestUtils.h"
#include "ArrayUtils.h"
#include "GpuConstants.h"
#include "CellFunctions.h"
/**
* @brief Test to compare results from #gpuStreaming and #gpu_streaming
*
* @param tc test case
* @test
* - Prepare boundary conditions for the lid driven cavity
* - Run the function
* - Check the sum of all the arrays between the two algorithms
*/
void testCompareGpuStream(CuTest *tc)
{
printBanner("Test compare streaming");
M = 1281;
N = 1282;
hipMemcpyToSymbol(width_d, &M, sizeof(int));
hipMemcpyToSymbol(height_d, &N, sizeof(int));
dim3 tpb(THREADS);
dim3 bpg1((int)(M*N/THREADS)+1);
dim3 bpg9((int)(9*M*N/THREADS)+1);
AIH = createHostArrayInt(M*N); //fluid
createLidBcFluid(AIH, M, N);
BIH = createHostArrayInt(9*M*N, ARRAY_FILL, 1); //stream9
CIH = createHostArrayInt(8*M*N, ARRAY_FILL, 1); //stream8
AFH = createHostArrayFlt(9*M*N, ARRAY_ZERO); //f1
BFH = createHostArrayFlt(9*M*N, ARRAY_ZERO); //f2
CFH = createHostArrayFlt(9*M*N, ARRAY_FILL, 0.0002); //fColl2
AID = createGpuArrayInt(M*N, ARRAY_COPY, 0, AIH);
BID = createGpuArrayInt(9*M*N, ARRAY_COPY, 0, BIH);
CID = createGpuArrayInt(8*M*N, ARRAY_COPY, 0, CIH);
AFD = createGpuArrayFlt(9*M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(9*M*N, ARRAY_COPY, 0, BFH);
CFD = createGpuArrayFlt(9*M*N, ARRAY_COPY, 0, CFH);
DFD = createGpuArrayFlt(9*M*N);
int sumAIa = sumHostInt(AIH, M*N);
int sumBIa = sumHostInt(BIH, 9*M*N);
int sumCIa = sumHostInt(CIH, 8*M*N);
FLOAT_TYPE sumAFa = sumHostFlt(AFH, 9*M*N);
FLOAT_TYPE sumBFa = sumHostFlt(BFH, 9*M*N);
FLOAT_TYPE sumCFa = sumHostFlt(CFH, 9*M*N);
hipLaunchKernelGGL(( gpu_streaming), dim3(bpg9),dim3(tpb), 0, 0, AID, BID, AFD, CFD);
hipLaunchKernelGGL(( gpuStreaming), dim3(bpg1),dim3(tpb), 0, 0, AID, CID, BFD, CFD);
hipMemcpy(AIH, AID, SIZEINT(N*M), hipMemcpyDeviceToHost);
hipMemcpy(BIH, BID, SIZEINT(9*N*M), hipMemcpyDeviceToHost);
hipMemcpy(CIH, CID, SIZEINT(8*N*M), hipMemcpyDeviceToHost);
hipMemcpy(AFH, AFD, SIZEFLT(9*N*M), hipMemcpyDeviceToHost);
hipMemcpy(BFH, BFD, SIZEFLT(9*N*M), hipMemcpyDeviceToHost);
hipMemcpy(CFH, CFD, SIZEFLT(9*N*M), hipMemcpyDeviceToHost);
int sumAIb = sumHostInt(AIH, M*N);
int sumBIb = sumHostInt(BIH, 9*M*N);
int sumCIb = sumHostInt(CIH, 8*M*N);
FLOAT_TYPE sumAFb = sumHostFlt(AFH, 9*M*N);
FLOAT_TYPE sumBFb = sumHostFlt(BFH, 9*M*N);
FLOAT_TYPE sumCFb = sumHostFlt(CFH, 9*M*N);
CuAssertIntEquals_Msg(tc, "fluid", sumAIa, sumAIb);
CuAssertIntEquals_Msg(tc, "stream9", sumBIa, sumBIb);
CuAssertIntEquals_Msg(tc, "stream8", sumCIa, sumCIb);
CuAssertDblEquals_Msg(tc, "f", sumAFb, sumBFb, 0.00001);
FLOAT_TYPE val = computeResidual(AFD, BFD, CFD, DFD, M, N);
CuAssertDblEquals_Msg(tc, "fNorm", val, 0, 0.00001);
CuAssertDblEquals_Msg(tc, "fColl", sumCFa, sumCFb, 0.00001);
}
///Clean up after test case
void cleanupTestCompareGpuStream()
{
free(AIH);free(BIH);free(CIH);free(AFH);free(BFH);free(CFH);
hipFree(AID);hipFree(BID);hipFree(CID);hipFree(AFD);hipFree(BFD);hipFree(CFD);hipFree(DFD);
}
CuSuite* gpuStreamGetSuite()
{
CuSuite* suite = CuSuiteNew();
SUITE_ADD_TCLN(suite, testCompareGpuStream, cleanupTestCompareGpuStream);
return suite;
} | e367b7a9d31f3460d9ee611c8a4bedb7a12f76d7.cu | /**
* Unittests for the boundary conditions
* @file TestGpuStream.cu
* @author Adam Koleszar ([email protected])
*/
#include "CuTest.h"
#include "GpuFunctions.h"
#include "ShellFunctions.h"
#include "ComputeResiduals.h"
#include "FloatType.h"
#include "TestUtils.h"
#include "ArrayUtils.h"
#include "GpuConstants.h"
#include "CellFunctions.h"
/**
* @brief Test to compare results from #gpuStreaming and #gpu_streaming
*
* @param tc test case
* @test
* - Prepare boundary conditions for the lid driven cavity
* - Run the function
* - Check the sum of all the arrays between the two algorithms
*/
void testCompareGpuStream(CuTest *tc)
{
printBanner("Test compare streaming");
M = 1281;
N = 1282;
cudaMemcpyToSymbol(width_d, &M, sizeof(int));
cudaMemcpyToSymbol(height_d, &N, sizeof(int));
dim3 tpb(THREADS);
dim3 bpg1((int)(M*N/THREADS)+1);
dim3 bpg9((int)(9*M*N/THREADS)+1);
AIH = createHostArrayInt(M*N); //fluid
createLidBcFluid(AIH, M, N);
BIH = createHostArrayInt(9*M*N, ARRAY_FILL, 1); //stream9
CIH = createHostArrayInt(8*M*N, ARRAY_FILL, 1); //stream8
AFH = createHostArrayFlt(9*M*N, ARRAY_ZERO); //f1
BFH = createHostArrayFlt(9*M*N, ARRAY_ZERO); //f2
CFH = createHostArrayFlt(9*M*N, ARRAY_FILL, 0.0002); //fColl2
AID = createGpuArrayInt(M*N, ARRAY_COPY, 0, AIH);
BID = createGpuArrayInt(9*M*N, ARRAY_COPY, 0, BIH);
CID = createGpuArrayInt(8*M*N, ARRAY_COPY, 0, CIH);
AFD = createGpuArrayFlt(9*M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(9*M*N, ARRAY_COPY, 0, BFH);
CFD = createGpuArrayFlt(9*M*N, ARRAY_COPY, 0, CFH);
DFD = createGpuArrayFlt(9*M*N);
int sumAIa = sumHostInt(AIH, M*N);
int sumBIa = sumHostInt(BIH, 9*M*N);
int sumCIa = sumHostInt(CIH, 8*M*N);
FLOAT_TYPE sumAFa = sumHostFlt(AFH, 9*M*N);
FLOAT_TYPE sumBFa = sumHostFlt(BFH, 9*M*N);
FLOAT_TYPE sumCFa = sumHostFlt(CFH, 9*M*N);
gpu_streaming<<<bpg9,tpb>>>(AID, BID, AFD, CFD);
gpuStreaming<<<bpg1,tpb>>>(AID, CID, BFD, CFD);
cudaMemcpy(AIH, AID, SIZEINT(N*M), cudaMemcpyDeviceToHost);
cudaMemcpy(BIH, BID, SIZEINT(9*N*M), cudaMemcpyDeviceToHost);
cudaMemcpy(CIH, CID, SIZEINT(8*N*M), cudaMemcpyDeviceToHost);
cudaMemcpy(AFH, AFD, SIZEFLT(9*N*M), cudaMemcpyDeviceToHost);
cudaMemcpy(BFH, BFD, SIZEFLT(9*N*M), cudaMemcpyDeviceToHost);
cudaMemcpy(CFH, CFD, SIZEFLT(9*N*M), cudaMemcpyDeviceToHost);
int sumAIb = sumHostInt(AIH, M*N);
int sumBIb = sumHostInt(BIH, 9*M*N);
int sumCIb = sumHostInt(CIH, 8*M*N);
FLOAT_TYPE sumAFb = sumHostFlt(AFH, 9*M*N);
FLOAT_TYPE sumBFb = sumHostFlt(BFH, 9*M*N);
FLOAT_TYPE sumCFb = sumHostFlt(CFH, 9*M*N);
CuAssertIntEquals_Msg(tc, "fluid", sumAIa, sumAIb);
CuAssertIntEquals_Msg(tc, "stream9", sumBIa, sumBIb);
CuAssertIntEquals_Msg(tc, "stream8", sumCIa, sumCIb);
CuAssertDblEquals_Msg(tc, "f", sumAFb, sumBFb, 0.00001);
FLOAT_TYPE val = computeResidual(AFD, BFD, CFD, DFD, M, N);
CuAssertDblEquals_Msg(tc, "fNorm", val, 0, 0.00001);
CuAssertDblEquals_Msg(tc, "fColl", sumCFa, sumCFb, 0.00001);
}
///Clean up after test case
void cleanupTestCompareGpuStream()
{
free(AIH);free(BIH);free(CIH);free(AFH);free(BFH);free(CFH);
cudaFree(AID);cudaFree(BID);cudaFree(CID);cudaFree(AFD);cudaFree(BFD);cudaFree(CFD);cudaFree(DFD);
}
CuSuite* gpuStreamGetSuite()
{
CuSuite* suite = CuSuiteNew();
SUITE_ADD_TCLN(suite, testCompareGpuStream, cleanupTestCompareGpuStream);
return suite;
} |
fc837445645096598392042ae26755315d9239f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<cmath>
#define TILE_SIZE 4 // Tile size and block size, both are taken as 32
__device__ void store_full_row(float*,float*,int,int, int, int);
__device__ void load_full_row(float*,float*,int,int, int, int);
__device__ void store_full(float*,float*,int,int,int, int, int);
__device__ void load_full(float*,float*,int,int,int, int, int);
__device__ void store_lower(float*,float*,int,int,int, int, int);
__device__ void load_lower(float*,float*,int,int,int, int, int);
__device__ void potrf_tile(float*);
__device__ void trsm_tile(float*,int,int,int);
__device__ void syrk_tile(float*,float*,int,int,int);
__global__ void right_looking_launch_kernel(float*,int);
__device__ void store_zeros(float*,int);
__device__ void store_full_row(float* read_data,float* write_data,int i,int N, int M, int shared_size_single_matrix)
{
int global_y;
int global_x = i*blockDim.y + threadIdx.y;
for(int j=0;j<N/TILE_SIZE;j++)
{
global_y = j*blockDim.z + threadIdx.z;
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix];
}
__syncthreads();
}
__device__ void load_full_row(float* read_data,float* write_data,int i,int N, int M, int shared_size_single_matrix)
{
int global_y;
int global_x = i*blockDim.y + threadIdx.y;
for(int j=0;j<N/TILE_SIZE;j++)
{
global_y = j*blockDim.z + threadIdx.z;
write_data[threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x];
// printf("%d, %d\n", threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix, global_y*N*M + global_x*M + threadIdx.x);
}
__syncthreads();
}
__device__ void store_full(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix];
__syncthreads();
}
__device__ void load_full(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
}
__device__ void store_lower(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
// printf("%f is at %d\n", read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix], threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix);
if(threadIdx.z >= threadIdx.y)
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix];
else
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = 0.0;
__syncthreads();
}
__device__ void load_lower(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
if(threadIdx.z >= threadIdx.y)
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x];
else
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = 0.0;
__syncthreads();
}
__device__ void potrf_tile(float* t_A)
{
int t_x = threadIdx.y;
int t_y = threadIdx.z;
__shared__ float temp2;
for(int k=0;k<TILE_SIZE;k++)
{
if(t_x==t_y && t_x==k)
{
t_A[k*(TILE_SIZE+1) + k] = sqrtf(t_A[k*(TILE_SIZE+1) + k]);
temp2 = t_A[k*(TILE_SIZE+1) + k];
}
__syncthreads();
if(t_x<t_y && t_x == k)
{
t_A[t_y*(TILE_SIZE+1) + k]/= temp2;
}
__syncthreads();
if(k<t_y && k<t_x && t_x<=t_y)
{
t_A[t_y*(TILE_SIZE+1) + t_x]-= t_A[t_x*(TILE_SIZE+1) + k]*t_A[t_y*(TILE_SIZE+1) + k];
}
__syncthreads();
}
}
__device__ void trsm_tile(float *row_data,int i,int j,int N)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
int t_x = threadIdx.y;
int t_y = threadIdx.z;
for(int s=0;s<TILE_SIZE;s++)
{
if(t_x==s)
{
row_data[global_y*(TILE_SIZE+1) + t_x]/= row_data[global_x*(TILE_SIZE+1) + t_x];
}
__syncthreads();
if(t_x > s)
{
row_data[global_y*(TILE_SIZE+1) + t_x]-= row_data[global_x*(TILE_SIZE+1) + s]*row_data[global_y*(TILE_SIZE+1) + s];
}
__syncthreads();
}
}
__device__ void syrk_tile(float* row_data,float* edit_data,int i,int j,int N)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
int t_y = threadIdx.z;
int t_x = threadIdx.y;
float valueToSubtract = 0.0;
for(int r=0;r<TILE_SIZE;r++)
{
valueToSubtract+= row_data[r + global_y*(TILE_SIZE+1)]*row_data[r + global_x*(TILE_SIZE+1)];
}
edit_data[t_y*(TILE_SIZE+1) + t_x]-= valueToSubtract;
__syncthreads();
}
__device__ void store_zeros(float* write_data,int N, int M)
{
int t_y = threadIdx.z;
int t_x = threadIdx.y;
int i,j;
for(i=0;i<N/TILE_SIZE-1;i++)
{
for(j=i+1;j<N/TILE_SIZE;j++)
{
int global_x = j*blockDim.z + threadIdx.z;
int global_y = i*blockDim.y + threadIdx.y;
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x +threadIdx.x] = 0.0;
}
// A[j*blockDim.x + t_x + (i*blockDim.y + t_y)*N] = 0.0;
}
__syncthreads();
}
__global__ void right_looking_launch_kernel(float* read_data,int N, int M , int num_of_matrices_per_block, int shared_size_single_matrix) // N -> dim, M -> num of matrices per block
{
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0);
int tx = threadIdx.x;
float *rA1 = NULL;
extern __shared__ float row_data[];
// __shared__ float tile_data[TILE_SIZE*(TILE_SIZE+1)]; // Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory
int tile_data_index = M * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1);
// __shared__ float* tile_data = &row_data[M * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)];
int shared_size_single_matrix_tile_data = TILE_SIZE * (TILE_SIZE + 1);
int i,j,k;
for(i=0;i<N/TILE_SIZE;i++)
{
load_lower(read_data,&row_data[tile_data_index],i,i,N, M, shared_size_single_matrix_tile_data);
// printf("%d \n", tile_data_index + shared_size_single_matrix_tile_data * M);
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
// for (int z = tile_data_index; z < tile_data_index + shared_size_single_matrix_tile_data * M; z++) {
// printf("%f is at %d\n", row_data[z], z);
// }
// }
rA1 = &row_data[tile_data_index + tx*shared_size_single_matrix_tile_data];
// printf("%d\n", tx*shared_size_single_matrix_tile_data);
// potrf_tile(tile_data);
potrf_tile(rA1);
store_lower(&row_data[tile_data_index],read_data,i,i,N, M, shared_size_single_matrix_tile_data);
load_full_row(read_data,row_data,i,N, M, shared_size_single_matrix);
for(j=i+1;j<N/TILE_SIZE;j++)
{
trsm_tile(&row_data[tx*shared_size_single_matrix],i,j,N);
for(k=i+1;k<j;k++)
{
load_full(read_data,&row_data[tile_data_index],k,j,N, M, shared_size_single_matrix_tile_data);
rA1 = &row_data[tile_data_index + tx*shared_size_single_matrix_tile_data];
// syrk_tile(row_data,tile_data,k,j,N);
syrk_tile(&row_data[tx*shared_size_single_matrix],rA1,k,j,N);
store_full(&row_data[tile_data_index],read_data,k,j,N, M, shared_size_single_matrix_tile_data);
}
load_full(read_data,&row_data[tile_data_index],k,j,N, M, shared_size_single_matrix_tile_data);
syrk_tile(&row_data[tx*shared_size_single_matrix],&row_data[tile_data_index + tx*shared_size_single_matrix_tile_data],k,j,N);
store_full(&row_data[tile_data_index],read_data,k,j,N, M, shared_size_single_matrix_tile_data);
}
store_full_row(row_data,read_data,i,N, M, shared_size_single_matrix);
}
store_zeros(read_data,N,M);
}
int main()
{
// int n,N;
// printf("Enter dimension (N) : ");
// scanf("%d",&n);
// if((n%TILE_SIZE)==0)
// N = n;
// else
// N = (((int) (n/TILE_SIZE)) + 1)*TILE_SIZE;
// size_t size = N*N*sizeof(float);
// float *M = (float *)malloc(size);
// if(M == NULL)
// {
// fprintf(stderr,"Failed to allocate host vectors!\n");
// exit(EXIT_FAILURE);
// }
// int i,j;
// printf("Enter input matrix: \n");
// for(i=0;i<N;i++)
// {
// for(j=0;j<N;j++)
// {
// if(i>=n || j>=n)
// M[i*N + j] = 1; //Padding the matrix with 1
// else
// scanf("%f",&M[i*N + j]);
// }
// }
FILE *fptr;
fptr = fopen("./dataset/size4_256matrices.txt", "r");
int num_of_matrices, dim_of_matrix;
fscanf(fptr, "%d", &num_of_matrices);
fscanf(fptr, "%d", &dim_of_matrix);
float read_element;
float* h_A = NULL;
int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix;
size_t size = numElements * sizeof(float);
hipDeviceProp_t devp;
hipGetDeviceProperties(&devp, 0);
h_A = (float *)malloc(size);
int global_id = 0;
for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
fscanf(fptr, "%f", &read_element);
global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index;
h_A[global_id] = read_element;
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr);
printf("\nPrinting the host-side input array read from the input file:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
printf("\n\n");
// hipError_t err = hipSuccess;
// float *read_data = NULL;
// err = hipMalloc((void **)&read_data,N*N*sizeof(float));
// if(err != hipSuccess)
// {
// fprintf(stderr,"Failed to allocate matrix on the CUDA device! (error code %s)\n",hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// printf("Coping the matrix from host memory to device memory\n");
// err = hipMemcpy(read_data,M,size,hipMemcpyHostToDevice);
// if(err != hipSuccess)
// {
// fprintf(stderr,"Failed to copy matrix from host to device (error code %s)\n",hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// printf("Testing for matrix M [%dx%d]\n",N,N);
hipError_t err = hipSuccess;
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("Copied the h_A to device side successfully!\n\n");
}
// dim3 grid(1,1,1);
// dim3 block(TILE_SIZE,TILE_SIZE,1);
// size_t shared_size = (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)*sizeof(float);
//hipLaunchKernelGGL(( right_looking_launch_kernel), dim3(grid),dim3(block),shared_size, 0, read_data,N);
// err = hipMemcpy(M,read_data,size,hipMemcpyDeviceToHost);
// if(err != hipSuccess)
// {
// fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// int num_of_matrices_per_block = num_of_matrices;
int num_of_matrices_per_block = min(128/(TILE_SIZE * TILE_SIZE) , num_of_matrices);
dim3 grid((num_of_matrices) / num_of_matrices_per_block , 1, 1);
dim3 block(num_of_matrices_per_block, TILE_SIZE, TILE_SIZE);
// dim3 grid(1, 1, 1);
// dim3 block(num_of_matrices, TILE_SIZE, TILE_SIZE);
// no of tiles in a column
// int INPUT_SIZE = dim_of_matrix;
// int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
int N = dim_of_matrix;
size_t shared_size = num_of_matrices * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)*sizeof(float) + num_of_matrices_per_block * TILE_SIZE*(TILE_SIZE+1) * sizeof(float);
hipLaunchKernelGGL(( right_looking_launch_kernel), dim3(grid),dim3(block),shared_size, 0, d_A, dim_of_matrix, num_of_matrices, num_of_matrices ,(num_of_matrices * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1))/num_of_matrices);
//left_looking_kernel<<<grid, block, num_of_matrices_per_block * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess) {
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
}
// if(TILE_SIZE == INPUT_SIZE)
// {
// // printf("The if statement works.\n");
// left_looking_kernel<<<grid, block, num_of_matrices * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
// }
// else if((no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float) < devp.sharedMemPerBlock)
// {
// //printf("The if statement works.\n");
// left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// // left_looking_kernel<<<grid, block,num_of_matrices * (no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,(no_of_tiles + 2) * TILE_SIZE * TILE_SIZE);
// }
// else
// {
// left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// }
// printf("Printing output matrix\n");
// for(i=0;i<n;i++)
// {
// for(j=0;j<n;j++)
// {
// if(j<=i)
// printf("%f\t",M[i*N + j]);
// else
// printf("%f\t",0.0);
// }
// printf("\n");
// }
// err = hipFree(read_data);
// if(err != hipSuccess)
// {
// fprintf(stderr, "Failed to free device matrix M (error code %s)\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = hipDeviceReset();
// if(err != hipSuccess)
// {
// fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", hipGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// free(M);
// printf("DONE!\n");
err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("\nCopied d_A to host side successfully!\n");
}
printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
err = hipFree(d_A);
if(err != hipSuccess)
{
fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipDeviceReset();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
FILE *fptr1;
fptr1 = fopen("./output_r.txt", "w+");
float write_element;
fprintf(fptr1, "%d\n", num_of_matrices);
fprintf(fptr1, "%d\n", dim_of_matrix);
for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
//write_element = h_A[matrix_index * dim_of_matrix * dim_of_matrix + row * dim_of_matrix + column];
global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index;
write_element = h_A[global_id] ;
fprintf(fptr1, "%0.2f ", write_element);
}
fprintf(fptr1,"\n");
}
fprintf(fptr1,"\n");
}
fclose(fptr1);
free(h_A);
printf("\n\nAll tasks completed successfully!\n\n");
return 0;
}
| fc837445645096598392042ae26755315d9239f5.cu | #include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<cmath>
#define TILE_SIZE 4 // Tile size and block size, both are taken as 32
__device__ void store_full_row(float*,float*,int,int, int, int);
__device__ void load_full_row(float*,float*,int,int, int, int);
__device__ void store_full(float*,float*,int,int,int, int, int);
__device__ void load_full(float*,float*,int,int,int, int, int);
__device__ void store_lower(float*,float*,int,int,int, int, int);
__device__ void load_lower(float*,float*,int,int,int, int, int);
__device__ void potrf_tile(float*);
__device__ void trsm_tile(float*,int,int,int);
__device__ void syrk_tile(float*,float*,int,int,int);
__global__ void right_looking_launch_kernel(float*,int);
__device__ void store_zeros(float*,int);
__device__ void store_full_row(float* read_data,float* write_data,int i,int N, int M, int shared_size_single_matrix)
{
int global_y;
int global_x = i*blockDim.y + threadIdx.y;
for(int j=0;j<N/TILE_SIZE;j++)
{
global_y = j*blockDim.z + threadIdx.z;
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix];
}
__syncthreads();
}
__device__ void load_full_row(float* read_data,float* write_data,int i,int N, int M, int shared_size_single_matrix)
{
int global_y;
int global_x = i*blockDim.y + threadIdx.y;
for(int j=0;j<N/TILE_SIZE;j++)
{
global_y = j*blockDim.z + threadIdx.z;
write_data[threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x];
// printf("%d, %d\n", threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix, global_y*N*M + global_x*M + threadIdx.x);
}
__syncthreads();
}
__device__ void store_full(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix];
__syncthreads();
}
__device__ void load_full(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
}
__device__ void store_lower(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
// printf("%f is at %d\n", read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix], threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix);
if(threadIdx.z >= threadIdx.y)
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix];
else
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x] = 0.0;
__syncthreads();
}
__device__ void load_lower(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
if(threadIdx.z >= threadIdx.y)
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x + threadIdx.x];
else
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = 0.0;
__syncthreads();
}
__device__ void potrf_tile(float* t_A)
{
int t_x = threadIdx.y;
int t_y = threadIdx.z;
__shared__ float temp2;
for(int k=0;k<TILE_SIZE;k++)
{
if(t_x==t_y && t_x==k)
{
t_A[k*(TILE_SIZE+1) + k] = sqrtf(t_A[k*(TILE_SIZE+1) + k]);
temp2 = t_A[k*(TILE_SIZE+1) + k];
}
__syncthreads();
if(t_x<t_y && t_x == k)
{
t_A[t_y*(TILE_SIZE+1) + k]/= temp2;
}
__syncthreads();
if(k<t_y && k<t_x && t_x<=t_y)
{
t_A[t_y*(TILE_SIZE+1) + t_x]-= t_A[t_x*(TILE_SIZE+1) + k]*t_A[t_y*(TILE_SIZE+1) + k];
}
__syncthreads();
}
}
__device__ void trsm_tile(float *row_data,int i,int j,int N)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
int t_x = threadIdx.y;
int t_y = threadIdx.z;
for(int s=0;s<TILE_SIZE;s++)
{
if(t_x==s)
{
row_data[global_y*(TILE_SIZE+1) + t_x]/= row_data[global_x*(TILE_SIZE+1) + t_x];
}
__syncthreads();
if(t_x > s)
{
row_data[global_y*(TILE_SIZE+1) + t_x]-= row_data[global_x*(TILE_SIZE+1) + s]*row_data[global_y*(TILE_SIZE+1) + s];
}
__syncthreads();
}
}
__device__ void syrk_tile(float* row_data,float* edit_data,int i,int j,int N)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
int t_y = threadIdx.z;
int t_x = threadIdx.y;
float valueToSubtract = 0.0;
for(int r=0;r<TILE_SIZE;r++)
{
valueToSubtract+= row_data[r + global_y*(TILE_SIZE+1)]*row_data[r + global_x*(TILE_SIZE+1)];
}
edit_data[t_y*(TILE_SIZE+1) + t_x]-= valueToSubtract;
__syncthreads();
}
__device__ void store_zeros(float* write_data,int N, int M)
{
int t_y = threadIdx.z;
int t_x = threadIdx.y;
int i,j;
for(i=0;i<N/TILE_SIZE-1;i++)
{
for(j=i+1;j<N/TILE_SIZE;j++)
{
int global_x = j*blockDim.z + threadIdx.z;
int global_y = i*blockDim.y + threadIdx.y;
write_data[global_y*N*M + global_x*M + blockIdx.x * blockDim.x +threadIdx.x] = 0.0;
}
// A[j*blockDim.x + t_x + (i*blockDim.y + t_y)*N] = 0.0;
}
__syncthreads();
}
__global__ void right_looking_launch_kernel(float* read_data,int N, int M , int num_of_matrices_per_block, int shared_size_single_matrix) // N -> dim, M -> num of matrices per block
{
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0);
int tx = threadIdx.x;
float *rA1 = NULL;
extern __shared__ float row_data[];
// __shared__ float tile_data[TILE_SIZE*(TILE_SIZE+1)]; // Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory
int tile_data_index = M * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1);
// __shared__ float* tile_data = &row_data[M * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)];
int shared_size_single_matrix_tile_data = TILE_SIZE * (TILE_SIZE + 1);
int i,j,k;
for(i=0;i<N/TILE_SIZE;i++)
{
load_lower(read_data,&row_data[tile_data_index],i,i,N, M, shared_size_single_matrix_tile_data);
// printf("%d \n", tile_data_index + shared_size_single_matrix_tile_data * M);
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
// for (int z = tile_data_index; z < tile_data_index + shared_size_single_matrix_tile_data * M; z++) {
// printf("%f is at %d\n", row_data[z], z);
// }
// }
rA1 = &row_data[tile_data_index + tx*shared_size_single_matrix_tile_data];
// printf("%d\n", tx*shared_size_single_matrix_tile_data);
// potrf_tile(tile_data);
potrf_tile(rA1);
store_lower(&row_data[tile_data_index],read_data,i,i,N, M, shared_size_single_matrix_tile_data);
load_full_row(read_data,row_data,i,N, M, shared_size_single_matrix);
for(j=i+1;j<N/TILE_SIZE;j++)
{
trsm_tile(&row_data[tx*shared_size_single_matrix],i,j,N);
for(k=i+1;k<j;k++)
{
load_full(read_data,&row_data[tile_data_index],k,j,N, M, shared_size_single_matrix_tile_data);
rA1 = &row_data[tile_data_index + tx*shared_size_single_matrix_tile_data];
// syrk_tile(row_data,tile_data,k,j,N);
syrk_tile(&row_data[tx*shared_size_single_matrix],rA1,k,j,N);
store_full(&row_data[tile_data_index],read_data,k,j,N, M, shared_size_single_matrix_tile_data);
}
load_full(read_data,&row_data[tile_data_index],k,j,N, M, shared_size_single_matrix_tile_data);
syrk_tile(&row_data[tx*shared_size_single_matrix],&row_data[tile_data_index + tx*shared_size_single_matrix_tile_data],k,j,N);
store_full(&row_data[tile_data_index],read_data,k,j,N, M, shared_size_single_matrix_tile_data);
}
store_full_row(row_data,read_data,i,N, M, shared_size_single_matrix);
}
store_zeros(read_data,N,M);
}
int main()
{
// int n,N;
// printf("Enter dimension (N) : ");
// scanf("%d",&n);
// if((n%TILE_SIZE)==0)
// N = n;
// else
// N = (((int) (n/TILE_SIZE)) + 1)*TILE_SIZE;
// size_t size = N*N*sizeof(float);
// float *M = (float *)malloc(size);
// if(M == NULL)
// {
// fprintf(stderr,"Failed to allocate host vectors!\n");
// exit(EXIT_FAILURE);
// }
// int i,j;
// printf("Enter input matrix: \n");
// for(i=0;i<N;i++)
// {
// for(j=0;j<N;j++)
// {
// if(i>=n || j>=n)
// M[i*N + j] = 1; //Padding the matrix with 1
// else
// scanf("%f",&M[i*N + j]);
// }
// }
FILE *fptr;
fptr = fopen("./dataset/size4_256matrices.txt", "r");
int num_of_matrices, dim_of_matrix;
fscanf(fptr, "%d", &num_of_matrices);
fscanf(fptr, "%d", &dim_of_matrix);
float read_element;
float* h_A = NULL;
int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix;
size_t size = numElements * sizeof(float);
cudaDeviceProp devp;
cudaGetDeviceProperties(&devp, 0);
h_A = (float *)malloc(size);
int global_id = 0;
for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
fscanf(fptr, "%f", &read_element);
global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index;
h_A[global_id] = read_element;
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr);
printf("\nPrinting the host-side input array read from the input file:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
printf("\n\n");
// cudaError_t err = cudaSuccess;
// float *read_data = NULL;
// err = cudaMalloc((void **)&read_data,N*N*sizeof(float));
// if(err != cudaSuccess)
// {
// fprintf(stderr,"Failed to allocate matrix on the CUDA device! (error code %s)\n",cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// printf("Coping the matrix from host memory to device memory\n");
// err = cudaMemcpy(read_data,M,size,cudaMemcpyHostToDevice);
// if(err != cudaSuccess)
// {
// fprintf(stderr,"Failed to copy matrix from host to device (error code %s)\n",cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// printf("Testing for matrix M [%dx%d]\n",N,N);
cudaError_t err = cudaSuccess;
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("Copied the h_A to device side successfully!\n\n");
}
// dim3 grid(1,1,1);
// dim3 block(TILE_SIZE,TILE_SIZE,1);
// size_t shared_size = (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)*sizeof(float);
// right_looking_launch_kernel<<<grid,block,shared_size>>>(read_data,N);
// err = cudaMemcpy(M,read_data,size,cudaMemcpyDeviceToHost);
// if(err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// int num_of_matrices_per_block = num_of_matrices;
int num_of_matrices_per_block = min(128/(TILE_SIZE * TILE_SIZE) , num_of_matrices);
dim3 grid((num_of_matrices) / num_of_matrices_per_block , 1, 1);
dim3 block(num_of_matrices_per_block, TILE_SIZE, TILE_SIZE);
// dim3 grid(1, 1, 1);
// dim3 block(num_of_matrices, TILE_SIZE, TILE_SIZE);
// no of tiles in a column
// int INPUT_SIZE = dim_of_matrix;
// int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
int N = dim_of_matrix;
size_t shared_size = num_of_matrices * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)*sizeof(float) + num_of_matrices_per_block * TILE_SIZE*(TILE_SIZE+1) * sizeof(float);
right_looking_launch_kernel<<<grid,block,shared_size>>>(d_A, dim_of_matrix, num_of_matrices, num_of_matrices ,(num_of_matrices * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1))/num_of_matrices);
//left_looking_kernel<<<grid, block, num_of_matrices_per_block * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
// if(TILE_SIZE == INPUT_SIZE)
// {
// // printf("The if statement works.\n");
// left_looking_kernel<<<grid, block, num_of_matrices * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
// }
// else if((no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float) < devp.sharedMemPerBlock)
// {
// //printf("The if statement works.\n");
// left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// // left_looking_kernel<<<grid, block,num_of_matrices * (no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,(no_of_tiles + 2) * TILE_SIZE * TILE_SIZE);
// }
// else
// {
// left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// }
// printf("Printing output matrix\n");
// for(i=0;i<n;i++)
// {
// for(j=0;j<n;j++)
// {
// if(j<=i)
// printf("%f\t",M[i*N + j]);
// else
// printf("%f\t",0.0);
// }
// printf("\n");
// }
// err = cudaFree(read_data);
// if(err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = cudaDeviceReset();
// if(err != cudaSuccess)
// {
// fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// free(M);
// printf("DONE!\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("\nCopied d_A to host side successfully!\n");
}
printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
err = cudaFree(d_A);
if(err != cudaSuccess)
{
fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceReset();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
FILE *fptr1;
fptr1 = fopen("./output_r.txt", "w+");
float write_element;
fprintf(fptr1, "%d\n", num_of_matrices);
fprintf(fptr1, "%d\n", dim_of_matrix);
for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
//write_element = h_A[matrix_index * dim_of_matrix * dim_of_matrix + row * dim_of_matrix + column];
global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index;
write_element = h_A[global_id] ;
fprintf(fptr1, "%0.2f ", write_element);
}
fprintf(fptr1,"\n");
}
fprintf(fptr1,"\n");
}
fclose(fptr1);
free(h_A);
printf("\n\nAll tasks completed successfully!\n\n");
return 0;
}
|
b7ef1324542929d61f54d2999610e84ea30aab89.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Avro reader class implementation
**/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
**/
type_id to_type_id(const avro::schema_entry *col)
{
switch (col->kind) {
case avro::type_boolean: return type_id::BOOL8;
case avro::type_int: return type_id::INT32;
case avro::type_long: return type_id::INT64;
case avro::type_float: return type_id::FLOAT32;
case avro::type_double: return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string: return type_id::STRING;
case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default: return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
class metadata : public file_metadata {
public:
explicit metadata(datasource *const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
**/
void init_and_select_rows(int &row_start, int &row_count)
{
const auto buffer = source->host_read(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names)
{
std::vector<std::pair<int, std::string>> selection;
const auto num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) { index = 0; }
if (columns[index].name == use_name &&
type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
} else {
for (int i = 0; i < num_avro_columns; ++i) {
// Exclude array columns (unsupported)
bool column_in_array = false;
for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0;
parent_idx = schema[parent_idx].parent_idx) {
if (schema[parent_idx].kind == avro::type_array) {
column_in_array = true;
break;
}
}
if (!column_in_array) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
private:
datasource *const source;
};
rmm::device_buffer reader::impl::decompress_data(const rmm::device_buffer &comp_block_data,
hipStream_t stream)
{
size_t uncompressed_data_size = 0;
hostdevice_vector<gpu_inflate_input_s> inflate_in(_metadata->block_list.size());
hostdevice_vector<gpu_inflate_status_s> inflate_out(_metadata->block_list.size());
if (_metadata->codec == "deflate") {
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (_metadata->max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * _metadata->block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) { inflate_in[i].dstSize = initial_blk_len; }
} else if (_metadata->codec == "snappy") {
// Extract the uncompressed length from the snappy stream
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
const auto buffer = _source->host_read(_metadata->block_list[i].offset, 4);
const uint8_t *blk = buffer->data();
uint32_t blk_len = blk[0];
if (blk_len > 0x7f) {
blk_len = (blk_len & 0x7f) | (blk[1] << 7);
if (blk_len > 0x3fff) {
blk_len = (blk_len & 0x3fff) | (blk[2] << 14);
if (blk_len > 0x1fffff) { blk_len = (blk_len & 0x1fffff) | (blk[3] << 21); }
}
}
inflate_in[i].dstSize = blk_len;
uncompressed_data_size += blk_len;
}
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
const auto base_offset = _metadata->block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
const auto src_pos = _metadata->block_list[i].offset - base_offset;
inflate_in[i].srcDevice = static_cast<const uint8_t *>(comp_block_data.data()) + src_pos;
inflate_in[i].srcSize = _metadata->block_list[i].size;
inflate_in[i].dstDevice = static_cast<uint8_t *>(decomp_block_data.data()) + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
CUDA_TRY(hipMemcpyAsync(inflate_in.device_ptr(),
inflate_in.host_ptr(),
inflate_in.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(hipMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream));
if (_metadata->codec == "deflate") {
CUDA_TRY(gpuinflate(
inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream));
} else if (_metadata->codec == "snappy") {
CUDA_TRY(
gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), stream));
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
CUDA_TRY(hipMemcpyAsync(inflate_out.host_ptr(),
inflate_out.device_ptr(),
inflate_out.memory_size(),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
// Check if larger output is required, as it's not known ahead of time
if (_metadata->codec == "deflate" && !loop_cnt) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size);
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
auto dst_base = static_cast<uint8_t *>(decomp_block_data.data());
inflate_in[i].dstDevice = dst_base + dst_pos;
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
} else {
break;
}
} else {
break;
}
}
return decomp_block_data;
}
void reader::impl::decode_data(const rmm::device_buffer &block_data,
const std::vector<std::pair<uint32_t, uint32_t>> &dict,
hostdevice_vector<uint8_t> &global_dictionary,
size_t total_dictionary_entries,
size_t num_rows,
std::vector<std::pair<int, std::string>> selection,
std::vector<column_buffer> &out_buffers,
hipStream_t stream)
{
// Build gpu schema
hostdevice_vector<gpu::schemadesc_s> schema_desc(_metadata->schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < _metadata->schema.size(); i++) {
type_kind_e kind = _metadata->schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union and array members from min_row_data_size
skip_field_cnt += _metadata->schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
case type_array:
skip_field_cnt = _metadata->schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: min_row_data_size += 1; break;
case type_float: min_row_data_size += 4; break;
case type_double: min_row_data_size += 8; break;
default: break;
}
}
if (kind == type_enum && !_metadata->schema[i].symbols.size()) { kind = type_int; }
schema_desc[i].kind = kind;
schema_desc[i].count = (kind == type_enum) ? 0 : (uint32_t)_metadata->schema[i].num_children;
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(
kind != type_union || _metadata->schema[i].num_children < 2 ||
(_metadata->schema[i].num_children == 2 && (_metadata->schema[i + 1].kind == type_null ||
_metadata->schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void *> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
int schema_data_idx = _metadata->columns[col_idx].schema_data_idx;
int schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (_metadata->schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
if (out_buffers[i].null_mask_size()) {
set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream);
}
}
rmm::device_buffer block_list(
_metadata->block_list.data(), _metadata->block_list.size() * sizeof(block_desc_s), stream);
CUDA_TRY(hipMemcpyAsync(schema_desc.device_ptr(),
schema_desc.host_ptr(),
schema_desc.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(
gpu::DecodeAvroColumnData(static_cast<block_desc_s *>(block_list.data()),
schema_desc.device_ptr(),
reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()),
static_cast<const uint8_t *>(block_data.data()),
static_cast<uint32_t>(_metadata->block_list.size()),
static_cast<uint32_t>(schema_desc.size()),
static_cast<uint32_t>(total_dictionary_entries),
_metadata->num_rows,
_metadata->skip_rows,
min_row_data_size,
stream));
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(hipMemcpyAsync(out_buffers[i].null_mask(),
valid_alias[i],
out_buffers[i].null_mask_size(),
hipMemcpyHostToDevice,
stream));
}
}
CUDA_TRY(hipMemcpyAsync(schema_desc.host_ptr(),
schema_desc.device_ptr(),
schema_desc.memory_size(),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
const auto schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr), _columns(options.columns)
{
// Open the source Avro dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
}
table_with_metadata reader::impl::read(int skip_rows, int num_rows, hipStream_t stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Select and read partial metadata / schema within the subset of rows
_metadata->init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = _metadata->select_columns(_columns);
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : selected_columns) {
auto &col_schema = _metadata->schema[_metadata->columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (_metadata->total_data_size > 0) {
const auto buffer =
_source->host_read(_metadata->block_list[0].offset, _metadata->total_data_size);
rmm::device_buffer block_data(buffer->data(), buffer->size(), stream);
if (_metadata->codec != "" && _metadata->codec != "null") {
auto decomp_block_data = decompress_data(block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = _metadata->block_list[0].offset;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
_metadata->block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
std::vector<std::pair<uint32_t, uint32_t>> dict(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (const auto &sym : col_schema.symbols) { dictionary_data_size += sym.length(); }
}
hostdevice_vector<uint8_t> global_dictionary(
total_dictionary_entries * sizeof(gpu::nvstrdesc_s) + dictionary_data_size);
if (total_dictionary_entries > 0) {
size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s);
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx];
auto index =
&(reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.host_ptr()))[dict[i].first];
for (size_t j = 0; j < dict[i].second; j++) {
size_t len = col_schema.symbols[j].length();
char *ptr = reinterpret_cast<char *>(global_dictionary.device_ptr() + dict_pos);
index[j].ptr = ptr;
index[j].count = len;
memcpy(global_dictionary.host_ptr() + dict_pos, col_schema.symbols[j].c_str(), len);
dict_pos += len;
}
}
CUDA_TRY(hipMemcpyAsync(global_dictionary.device_ptr(),
global_dictionary.host_ptr(),
global_dictionary.memory_size(),
hipMemcpyHostToDevice,
stream));
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
bool is_nullable = (_metadata->columns[col_idx].schema_null_idx >= 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_data(block_data,
dict,
global_dictionary,
total_dictionary_entries,
num_rows,
selected_columns,
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = _metadata->user_data;
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)};
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(datasource::create(filepaths[0]), options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(hipStream_t stream) { return _impl->read(0, -1, stream); }
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, hipStream_t stream)
{
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, stream);
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace cudf
| b7ef1324542929d61f54d2999610e84ea30aab89.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Avro reader class implementation
**/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
**/
type_id to_type_id(const avro::schema_entry *col)
{
switch (col->kind) {
case avro::type_boolean: return type_id::BOOL8;
case avro::type_int: return type_id::INT32;
case avro::type_long: return type_id::INT64;
case avro::type_float: return type_id::FLOAT32;
case avro::type_double: return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string: return type_id::STRING;
case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default: return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
class metadata : public file_metadata {
public:
explicit metadata(datasource *const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
**/
void init_and_select_rows(int &row_start, int &row_count)
{
const auto buffer = source->host_read(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names)
{
std::vector<std::pair<int, std::string>> selection;
const auto num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) { index = 0; }
if (columns[index].name == use_name &&
type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
} else {
for (int i = 0; i < num_avro_columns; ++i) {
// Exclude array columns (unsupported)
bool column_in_array = false;
for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0;
parent_idx = schema[parent_idx].parent_idx) {
if (schema[parent_idx].kind == avro::type_array) {
column_in_array = true;
break;
}
}
if (!column_in_array) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
private:
datasource *const source;
};
rmm::device_buffer reader::impl::decompress_data(const rmm::device_buffer &comp_block_data,
cudaStream_t stream)
{
size_t uncompressed_data_size = 0;
hostdevice_vector<gpu_inflate_input_s> inflate_in(_metadata->block_list.size());
hostdevice_vector<gpu_inflate_status_s> inflate_out(_metadata->block_list.size());
if (_metadata->codec == "deflate") {
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (_metadata->max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * _metadata->block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) { inflate_in[i].dstSize = initial_blk_len; }
} else if (_metadata->codec == "snappy") {
// Extract the uncompressed length from the snappy stream
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
const auto buffer = _source->host_read(_metadata->block_list[i].offset, 4);
const uint8_t *blk = buffer->data();
uint32_t blk_len = blk[0];
if (blk_len > 0x7f) {
blk_len = (blk_len & 0x7f) | (blk[1] << 7);
if (blk_len > 0x3fff) {
blk_len = (blk_len & 0x3fff) | (blk[2] << 14);
if (blk_len > 0x1fffff) { blk_len = (blk_len & 0x1fffff) | (blk[3] << 21); }
}
}
inflate_in[i].dstSize = blk_len;
uncompressed_data_size += blk_len;
}
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
const auto base_offset = _metadata->block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
const auto src_pos = _metadata->block_list[i].offset - base_offset;
inflate_in[i].srcDevice = static_cast<const uint8_t *>(comp_block_data.data()) + src_pos;
inflate_in[i].srcSize = _metadata->block_list[i].size;
inflate_in[i].dstDevice = static_cast<uint8_t *>(decomp_block_data.data()) + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
CUDA_TRY(cudaMemcpyAsync(inflate_in.device_ptr(),
inflate_in.host_ptr(),
inflate_in.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(cudaMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream));
if (_metadata->codec == "deflate") {
CUDA_TRY(gpuinflate(
inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream));
} else if (_metadata->codec == "snappy") {
CUDA_TRY(
gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), stream));
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
CUDA_TRY(cudaMemcpyAsync(inflate_out.host_ptr(),
inflate_out.device_ptr(),
inflate_out.memory_size(),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// Check if larger output is required, as it's not known ahead of time
if (_metadata->codec == "deflate" && !loop_cnt) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size);
for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) {
auto dst_base = static_cast<uint8_t *>(decomp_block_data.data());
inflate_in[i].dstDevice = dst_base + dst_pos;
_metadata->block_list[i].offset = dst_pos;
_metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += _metadata->block_list[i].size;
}
} else {
break;
}
} else {
break;
}
}
return decomp_block_data;
}
void reader::impl::decode_data(const rmm::device_buffer &block_data,
const std::vector<std::pair<uint32_t, uint32_t>> &dict,
hostdevice_vector<uint8_t> &global_dictionary,
size_t total_dictionary_entries,
size_t num_rows,
std::vector<std::pair<int, std::string>> selection,
std::vector<column_buffer> &out_buffers,
cudaStream_t stream)
{
// Build gpu schema
hostdevice_vector<gpu::schemadesc_s> schema_desc(_metadata->schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < _metadata->schema.size(); i++) {
type_kind_e kind = _metadata->schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union and array members from min_row_data_size
skip_field_cnt += _metadata->schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
case type_array:
skip_field_cnt = _metadata->schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: min_row_data_size += 1; break;
case type_float: min_row_data_size += 4; break;
case type_double: min_row_data_size += 8; break;
default: break;
}
}
if (kind == type_enum && !_metadata->schema[i].symbols.size()) { kind = type_int; }
schema_desc[i].kind = kind;
schema_desc[i].count = (kind == type_enum) ? 0 : (uint32_t)_metadata->schema[i].num_children;
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(
kind != type_union || _metadata->schema[i].num_children < 2 ||
(_metadata->schema[i].num_children == 2 && (_metadata->schema[i + 1].kind == type_null ||
_metadata->schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void *> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
int schema_data_idx = _metadata->columns[col_idx].schema_data_idx;
int schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (_metadata->schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
if (out_buffers[i].null_mask_size()) {
set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream);
}
}
rmm::device_buffer block_list(
_metadata->block_list.data(), _metadata->block_list.size() * sizeof(block_desc_s), stream);
CUDA_TRY(cudaMemcpyAsync(schema_desc.device_ptr(),
schema_desc.host_ptr(),
schema_desc.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(
gpu::DecodeAvroColumnData(static_cast<block_desc_s *>(block_list.data()),
schema_desc.device_ptr(),
reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()),
static_cast<const uint8_t *>(block_data.data()),
static_cast<uint32_t>(_metadata->block_list.size()),
static_cast<uint32_t>(schema_desc.size()),
static_cast<uint32_t>(total_dictionary_entries),
_metadata->num_rows,
_metadata->skip_rows,
min_row_data_size,
stream));
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(cudaMemcpyAsync(out_buffers[i].null_mask(),
valid_alias[i],
out_buffers[i].null_mask_size(),
cudaMemcpyHostToDevice,
stream));
}
}
CUDA_TRY(cudaMemcpyAsync(schema_desc.host_ptr(),
schema_desc.device_ptr(),
schema_desc.memory_size(),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t i = 0; i < out_buffers.size(); i++) {
const auto col_idx = selection[i].first;
const auto schema_null_idx = _metadata->columns[col_idx].schema_null_idx;
out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr), _columns(options.columns)
{
// Open the source Avro dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
}
table_with_metadata reader::impl::read(int skip_rows, int num_rows, cudaStream_t stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Select and read partial metadata / schema within the subset of rows
_metadata->init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = _metadata->select_columns(_columns);
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : selected_columns) {
auto &col_schema = _metadata->schema[_metadata->columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (_metadata->total_data_size > 0) {
const auto buffer =
_source->host_read(_metadata->block_list[0].offset, _metadata->total_data_size);
rmm::device_buffer block_data(buffer->data(), buffer->size(), stream);
if (_metadata->codec != "" && _metadata->codec != "null") {
auto decomp_block_data = decompress_data(block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = _metadata->block_list[0].offset;
for (size_t i = 0; i < _metadata->block_list.size(); i++) {
_metadata->block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
std::vector<std::pair<uint32_t, uint32_t>> dict(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (const auto &sym : col_schema.symbols) { dictionary_data_size += sym.length(); }
}
hostdevice_vector<uint8_t> global_dictionary(
total_dictionary_entries * sizeof(gpu::nvstrdesc_s) + dictionary_data_size);
if (total_dictionary_entries > 0) {
size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s);
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx];
auto index =
&(reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.host_ptr()))[dict[i].first];
for (size_t j = 0; j < dict[i].second; j++) {
size_t len = col_schema.symbols[j].length();
char *ptr = reinterpret_cast<char *>(global_dictionary.device_ptr() + dict_pos);
index[j].ptr = ptr;
index[j].count = len;
memcpy(global_dictionary.host_ptr() + dict_pos, col_schema.symbols[j].c_str(), len);
dict_pos += len;
}
}
CUDA_TRY(cudaMemcpyAsync(global_dictionary.device_ptr(),
global_dictionary.host_ptr(),
global_dictionary.memory_size(),
cudaMemcpyHostToDevice,
stream));
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
bool is_nullable = (_metadata->columns[col_idx].schema_null_idx >= 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_data(block_data,
dict,
global_dictionary,
total_dictionary_entries,
num_rows,
selected_columns,
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = _metadata->user_data;
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)};
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(datasource::create(filepaths[0]), options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(cudaStream_t stream) { return _impl->read(0, -1, stream); }
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, cudaStream_t stream)
{
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, stream);
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace cudf
|
3aba78bf97d7ab179b5f4dfbf13c4dd7b17836c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernal.cuh"
#include <cmath>
#include <assert.h>
//TODO CHANGE EVERYTHING TO POINTERS, WHY DIDNT I DO THAT IN THE FIRST PLACE
//TODO GET RID OFF ALL THE VECTOR3 TO FLOAT3 TRANSLATIONS
__device__ bool CheckBounding(float3 nPos, float aggroRange, float3 pos, float3 halfDim)
{
float dist = abs(pos.x - nPos.x);
float sum = halfDim.x + aggroRange;
if(dist <= sum) {
dist = abs(pos.y - nPos.y);
sum = halfDim.y + aggroRange;
if(dist <= sum) {
dist = abs(pos.z - nPos.z);
sum = halfDim.z + aggroRange;
if(dist <= sum) {
//if there is collision data storage
return true;
}
}
}
return false;
}
#pragma region Together States
__device__ void cudaPatrol(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float MAXSPEED = 0.5F;
//at target
float diffX = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f)
{
//get new target
coreData->myAgents.targetLocation[a]++;
coreData->myAgents.targetLocation[a] = coreData->myAgents.targetLocation[a] % 2; //need to fix this
}
else
{
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
//find how much it needs to move
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += moveX * ((float(0) < diffX) - (diffX < float(0)));
coreData->myAgents.z[a] += moveZ * ((float(0) < diffZ) - (diffZ < float(0)));
}
//state transition
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
if (dist < aggroRange && !updateData->playerIsDead[p])
{
coreData->myAgents.state[a] = STARE_AT_PLAYER; //change state
coreData->myAgents.patrolLocation[a].loc[2].x = coreData->myAgents.x[a];
coreData->myAgents.patrolLocation[a].loc[2].y = coreData->myAgents.y[a];
coreData->myAgents.patrolLocation[a].loc[2].z = coreData->myAgents.z[a]; //set position it left patrol
coreData->myAgents.targetPlayer[a] = p; // playing that is being stared at
i = coreData->myPlayers.MAXPLAYERS; // exit the loop
}
++j;
}
++i;
}
}
__device__ void cudaStareAtPlayer(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
int p = coreData->myAgents.targetPlayer[a]; // target player
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//the range of aggro, and pull, to the player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
float pullRange = (aggroRange * 0.75f) * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]);
coreData->myAgents.waitedTime[a] += msec;
if ((dist < pullRange || coreData->myAgents.waitedTime[a] > 8000.0f ) && !updateData->playerIsDead[p]) // if the player is in pull range
{
coreData->myAgents.state[a] = CHASE_PLAYER;
coreData->myAgents.waitedTime[a] = 0.0f;
}
else
{
// if the player isnt in pull range check if there are any players closer
bool playerClose = false;
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p2 = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diffNew = float3();
diffNew.x = coreData->myPlayers.x[p2] - coreData->myAgents.x[a];
diffNew.y = coreData->myPlayers.y[p2] - coreData->myAgents.y[a];
diffNew.z = coreData->myPlayers.z[p2] - coreData->myAgents.z[a];
float distNew = sqrtf((diffNew.x*diffNew.x)+(diffNew.y*diffNew.y)+(diffNew.z*diffNew.z));
// if the new distance is less switch targte
if (distNew <= dist && !updateData->playerIsDead[p2])
{
coreData->myAgents.targetPlayer[a] = p2;
dist = distNew;
float aggroRangeNew = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * (coreData->myAgents.level[a] / coreData->myPlayers.level[p2]));
if (dist < aggroRangeNew)
{
playerClose = true;
}
}
++j;
}
++i;
}
// if there are no close players at all
if (!playerClose)
{
coreData->myAgents.waitedTime[a] = 0.0f;
coreData->myAgents.state[a] = PATROL;
coreData->myAgents.targetPlayer[a] = -1;
}
}
}
__device__ void cudaChasePlayer(CopyOnce* coreData, CopyEachFrame* updateData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float LEASHRANGE = 3200.0f;
float ATTACKRANGE = 75.0f;
float MAXSPEED = 0.5F;
int p = coreData->myAgents.targetPlayer[a];
//calculate distance to leash spot
float3 diff = float3();
diff.x = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
diff.y = coreData->myAgents.patrolLocation[a].loc[2].y - coreData->myAgents.y[a];
diff.z = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float leashDist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));;
// if its too far away or if the player died leash back
if (leashDist > LEASHRANGE || updateData->playerIsDead[p] || coreData->myAgents.targetPlayer[a] == -1)
{
coreData->myAgents.state[a] = LEASH;
coreData->myAgents.targetPlayer[a] = -1;
}
else
{
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if close to player switch state to useability
if (dist < ATTACKRANGE)
{
coreData->myAgents.state[a] = USE_ABILITY;
}
//move towards players location
float absX = abs(diff.x);
float absZ = abs(diff.z);
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += moveX * ((float(0) < diff.x) - (diff.x < float(0)));
coreData->myAgents.z[a] += moveZ * ((float(0) < diff.z) - (diff.z < float(0)));
}
}
__device__ void cudaLeashBack(CopyOnce* coreData, CopyEachFrame* updateData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float MAXSPEED = 0.5F;
//calculate distance to leash spot
float diffX = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f)
{
//change back to patrol
coreData->myAgents.state[a] = PATROL;
}
else
{
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += moveX * ((float(0) < diffX) - (diffX < float(0)));
coreData->myAgents.z[a] += moveZ * ((float(0) < diffZ) - (diffZ < float(0)));
}
}
__device__ void cudaReduceCooldowns(CopyOnce* coreData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0 ; i < coreData->myAgents.myAbilities->MAXABILITIES; ++i)
{
int check = coreData->myAgents.myAbilities[a].abil[i].cooldown > 0;
coreData->myAgents.myAbilities[a].abil[i].cooldown -= msec * check;
}
}
__device__ void cudaUseAbility(CopyOnce* coreData, CopyEachFrame* updateData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float ATTACKRANGE = 75.0f;
int p = coreData->myAgents.targetPlayer[a];
if (updateData->playerIsDead[p]) // if the player is dead
{
coreData->myAgents.state[a] = LEASH; //leash back
coreData->myAgents.targetPlayer[a] = -1; // set the target player to null
}
else
{
//TODO ADD ABILITIES BACK
//look through abilities via priority until one is found not on cooldown
int i = 0;
while (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown > 0.001f) {
i++;
}
//cast ability
if (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown < 0.001f)
{
coreData->myAgents.myAbilities[a].abil[i].cooldown = coreData->myAgents.myAbilities[a].abil[i].maxCooldown;
coreData->myPlayers.hp[coreData->myAgents.targetPlayer[a]] -= coreData->myAgents.myAbilities[a].abil[i].damage;
}
//if the player goes out of range, change state to chase
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
if (dist > (ATTACKRANGE))
{
coreData->myAgents.state[a] = CHASE_PLAYER;
}
}
}
__global__ void cudaFSM(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
switch (coreData->myAgents.state[a]) {
case PATROL: cudaPatrol(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case STARE_AT_PLAYER: cudaStareAtPlayer(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case CHASE_PLAYER: cudaChasePlayer(coreData, updateData, msec);
break;
case LEASH: cudaLeashBack(coreData, updateData, msec);
break;
case USE_ABILITY: cudaUseAbility(coreData, updateData, msec);
break;
};
cudaReduceCooldowns(coreData, msec);
}
#pragma endregion
#pragma region Seperated States
__device__ void cudaPatrolState(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
float MAXSPEED = 0.5F;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < coreData->myAgents.stateCount[0]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//at target
float diffX = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f && mod)
{
//get new target
coreData->myAgents.targetLocation[a]++;
coreData->myAgents.targetLocation[a] = coreData->myAgents.targetLocation[a] % 2; //need to fix this
}
else
{
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
//find how much it needs to move
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += (moveX * ((float(0) < diffX) - (diffX < float(0)))) * mod;
coreData->myAgents.z[a] += (moveZ * ((float(0) < diffZ) - (diffZ < float(0)))) * mod;
}
}
__device__ void cudaPatrolTransitions(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0) {
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < coreData->myAgents.stateCount[0]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//state transition
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
if (dist < aggroRange && !updateData->playerIsDead[p] && mod)
{
coreData->myAgents.state[a] = STARE_AT_PLAYER; //change state
coreData->myAgents.patrolLocation[a].loc[2].x = coreData->myAgents.x[a];
coreData->myAgents.patrolLocation[a].loc[2].y = coreData->myAgents.y[a];
coreData->myAgents.patrolLocation[a].loc[2].z = coreData->myAgents.z[a]; //set position it left patrol
coreData->myAgents.targetPlayer[a] = p; // playing that is being stared at
i = coreData->myPlayers.MAXPLAYERS; // exit the loop
}
++j;
}
++i;
}
}
__device__ void cudaStareAtPlayerState(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[1]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
coreData->myAgents.waitedTime[a] += msec * mod;
}
__device__ void cudaStareAtPlayerTransitions(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a]; // target player
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[1]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//the range of aggro, and pull, to the player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
float pullRange = (aggroRange * 0.75f) * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]);
if ((dist < pullRange || coreData->myAgents.waitedTime[a] > 8000.0f ) && !updateData->playerIsDead[p] && mod) // if the player is in pull range
{
coreData->myAgents.state[a] = CHASE_PLAYER;
coreData->myAgents.waitedTime[a] = 0.0f;
coreData->myAgents.y[a] += 5.0f;
}
else
{
// if the player isnt in pull range check if there are any players closer
bool playerClose = false;
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p2 = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diffNew = float3();
diffNew.x = coreData->myPlayers.x[p2] - coreData->myAgents.x[a];
diffNew.y = coreData->myPlayers.y[p2] - coreData->myAgents.y[a];
diffNew.z = coreData->myPlayers.z[p2] - coreData->myAgents.z[a];
float distNew = sqrtf((diffNew.x*diffNew.x)+(diffNew.y*diffNew.y)+(diffNew.z*diffNew.z));
// if the new distance is less switch targte
if (distNew <= dist && !updateData->playerIsDead[p2] && mod)
{
coreData->myAgents.targetPlayer[a] = p2;
dist = distNew;
float aggroRangeNew = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * (coreData->myAgents.level[a] / coreData->myPlayers.level[p2]));
if (dist < aggroRangeNew)
{
playerClose = true;
}
}
++j;
}
++i;
}
// if there are no close players at all
if (!playerClose && mod)
{
coreData->myAgents.waitedTime[a] = 0.0f;
coreData->myAgents.state[a] = PATROL;
coreData->myAgents.targetPlayer[a] = -1;
}
}
}
__device__ void cudaChasePlayerState(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0)
{
float MAXSPEED = 0.5F;
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a];
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[2]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
//move towards players location
float absX = abs(diff.x);
float absZ = abs(diff.z);
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += (moveX * ((float(0) < diff.x) - (diff.x < float(0)))) * mod;
coreData->myAgents.z[a] += (moveZ * ((float(0) < diff.z) - (diff.z < float(0)))) * mod;
}
__device__ void cudaChasePlayerTransitions(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0) {
float LEASHRANGE = 3200.0f;
float ATTACKRANGE = 75.0f;
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a];
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[2]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to leash spot
float3 diff = float3();
diff.x = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
diff.y = coreData->myAgents.patrolLocation[a].loc[2].y - coreData->myAgents.y[a];
diff.z = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float leashDist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));;
// if its too far away or if the player died leash back
if ((leashDist > LEASHRANGE || updateData->playerIsDead[p]) && mod)
{
coreData->myAgents.state[a] = LEASH;
coreData->myAgents.targetPlayer[a] = -1;
}
else
{
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if close to player switch state to useability
if (dist < ATTACKRANGE && mod)
{
coreData->myAgents.state[a] = USE_ABILITY;
}
}
}
__device__ void cudaUseAbilityState(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a];
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[3]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//look through abilities via priority until one is found not on cooldown
int i = 0;
while (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown > 0.001f) {
++i;
}
//cast ability
if (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown < 0.001f && mod)
{
coreData->myAgents.myAbilities[a].abil[i].cooldown = coreData->myAgents.myAbilities[a].abil[i].maxCooldown;
coreData->myPlayers.hp[p] -= coreData->myAgents.myAbilities[a].abil[i].damage;
}
}
__device__ void cudaUseAbilityTransitions(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0) {
int a = blockIdx.x * blockDim.x + threadIdx.x + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[3]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
float ATTACKRANGE = 75.0f;
int p = coreData->myAgents.targetPlayer[a];
if (updateData->playerIsDead[p] && mod) // if the player is dead
{
coreData->myAgents.state[a] = LEASH; //leash back
coreData->myAgents.targetPlayer[a] = -1; // set the target player to null
}
else
{
//if the player goes out of range, change state to chase
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
if (dist > (ATTACKRANGE) && mod)
{
coreData->myAgents.state[a] = CHASE_PLAYER;
}
}
}
__device__ void cudaLeashBackState(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0)
{
int a = blockIdx.x * blockDim.x + threadIdx.x + sCount;
float MAXSPEED = 0.5F;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[4]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to leash spot
float diffX = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += (moveX * ((float(0) < diffX) - (diffX < float(0)))) * mod;
coreData->myAgents.z[a] += (moveZ * ((float(0) < diffZ) - (diffZ < float(0)))) * mod;
}
__device__ void cudaLeashBackTransitions(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0) {
int a = blockIdx.x * blockDim.x + threadIdx.x + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[4]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to leash spot
float diffX = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f && mod)
{
//change back to patrol
coreData->myAgents.state[a] = PATROL;
}
}
#pragma region Globals
__global__ void cudaFSMSplit(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
switch (coreData->myAgents.state[a]) {
case PATROL: cudaPatrolState(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
cudaPatrolTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case STARE_AT_PLAYER: cudaStareAtPlayerState(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
cudaStareAtPlayerTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case CHASE_PLAYER: cudaChasePlayerState(coreData, updateData, msec);
cudaChasePlayerTransitions(coreData, updateData, msec);
break;
case LEASH: cudaLeashBackState(coreData, updateData, msec);
cudaLeashBackTransitions(coreData, updateData, msec);
break;
case USE_ABILITY: cudaUseAbilityState(coreData, updateData, msec);
cudaUseAbilityTransitions(coreData, updateData, msec);
break;
};
cudaReduceCooldowns(coreData, msec);
}
__global__ void cudaRunPatrol(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec) {
int a = blockIdx.x * blockDim.x + threadIdx.x;
cudaPatrolState(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
cudaPatrolTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
}
__global__ void cudaRunStare(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec) {
int count = coreData->myAgents.stateCount[0];
cudaStareAtPlayerState(coreData, updateData, agentsPartitions, partitionsPlayers, msec, count);
cudaStareAtPlayerTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec, count);
}
__global__ void cudaRunChase(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
int count = coreData->myAgents.stateCount[0] + coreData->myAgents.stateCount[1];
cudaChasePlayerState(coreData, updateData, msec, count);
cudaChasePlayerTransitions(coreData, updateData, msec, count);
}
__global__ void cudaRunAbility(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
int count = coreData->myAgents.stateCount[0] + coreData->myAgents.stateCount[1] + coreData->myAgents.stateCount[2];
cudaUseAbilityState(coreData, updateData, msec, count);
cudaUseAbilityTransitions(coreData, updateData, msec, count);
}
__global__ void cudaRunLeash(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
int count = coreData->myAgents.stateCount[0] + coreData->myAgents.stateCount[1] + coreData->myAgents.stateCount[2] + coreData->myAgents.stateCount[3];
cudaLeashBackState(coreData, updateData, msec, count);
cudaLeashBackTransitions(coreData, updateData, msec, count);
}
__global__ void cudaRunReduce(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
cudaReduceCooldowns(coreData, msec);
}
#pragma endregion
#pragma endregion
#pragma region Broadphase
__global__ void cudaBroadphasePlayers(CopyOnce* coreData, CopyEachFrame* updateData, short* partitionsPlayers)
{
int pa = blockIdx.x * blockDim.x + threadIdx.x;
//position of this partition
float3 pos = float3();
pos.x = coreData->myPartitions.pos[pa].x;
pos.y = coreData->myPartitions.pos[pa].y;
pos.z = coreData->myPartitions.pos[pa].z;
if (pos.x != 0 && pos.y != 0 && pos.z != 0)
{
updateData->playerCount[pa] = 0;
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
//loop through the players
for (int j = 0; j < coreData->myPlayers.MAXPLAYERS; ++j) {
//check the player exists
if(!updateData->playerIsDead[j] && coreData->myPlayers.maxHP[j] > 0)
{
//players position
float3 playerPos = float3();
playerPos.x = coreData->myPlayers.x[j];
playerPos.y = coreData->myPlayers.y[j];
playerPos.z = coreData->myPlayers.z[j];
//check if its in the partition
if (CheckBounding(playerPos, 0, pos, halfDim))
{
//add to the partitions players
partitionsPlayers[(pa*coreData->myPlayers.MAXPLAYERS) + updateData->playerCount[pa]] = j;
++updateData->playerCount[pa];
}
}
}
}
}
__global__ void cudaBroadphasePlayers2(CopyOnce* coreData, CopyEachFrame* updateData, short* partitionsPlayers, const int partitionCount)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int part = t % partitionCount;
int p = t / partitionCount;
if (p == 0) updateData->playerCount[part] = 0;
__syncthreads();
if(!updateData->playerIsDead[p] && coreData->myPlayers.maxHP[p] > 0) {
//players position
float3 playerPos = float3();
playerPos.x = coreData->myPlayers.x[p];
playerPos.y = coreData->myPlayers.y[p];
playerPos.z = coreData->myPlayers.z[p];
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
//position of this partition
float3 partPos = float3();
partPos.x = coreData->myPartitions.pos[part].x;
partPos.y = coreData->myPartitions.pos[part].y;
partPos.z = coreData->myPartitions.pos[part].z;
//check if its in the partition
if (CheckBounding(playerPos, 0, partPos, halfDim))
{
//add to the partitions players
int t = (part*coreData->myPlayers.MAXPLAYERS) + atomicAdd(&updateData->playerCount[part], 1);
__syncthreads();
partitionsPlayers[t] = p;
}
}
}
__global__ void cudaBroadphasePlayersCondence(CopyOnce* coreData, CopyEachFrame* updateData, short* partitionsPlayers, int partitionCount)
{
int part = blockIdx.x * blockDim.x + threadIdx.x;
updateData->playerCount[part] = 0;
for (int i = 0; i < coreData->myPlayers.MAXPLAYERS; ++i)
{
if (partitionsPlayers[(part*coreData->myPlayers.MAXPLAYERS) + i] != -1)
{
partitionsPlayers[(part*coreData->myPlayers.MAXPLAYERS) + updateData->playerCount[part]] = partitionsPlayers[(part*coreData->myPlayers.MAXPLAYERS) + i];
++updateData->playerCount[part];
}
}
}
__global__ void cudaBroadphaseAgents(CopyOnce* coreData, CopyEachFrame* updateData,short* agentsPartitions, const int partitionCount)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float3 agentPos = float3();
agentPos.x = coreData->myAgents.x[a];
agentPos.y = coreData->myAgents.y[a];
agentPos.z = coreData->myAgents.z[a];
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
int p = 0;
//loop through the world partitions
for (int j = 0; j < partitionCount; ++j) {
//position of this partition
float3 pos = float3();
pos.x = coreData->myPartitions.pos[j].x;
pos.y = coreData->myPartitions.pos[j].y;
pos.z = coreData->myPartitions.pos[j].z;
//check if the agent is in this partition
if (pos.x != 0 && pos.y != 0 && pos.z != 0){
if (CheckBounding(agentPos, coreData->myAgents.AGGRORANGE, pos, halfDim))
{
agentsPartitions[(a*8) + p] = j;
++p;
}
}
}
}
__global__ void cudaBroadphaseAgents2(CopyOnce* coreData, CopyEachFrame* updateData,short* agentsPartitions, const int partitionCount)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int part = t % partitionCount;
int a = t / partitionCount;
if (part == 0) coreData->myAgents.partCount[a] = 0;
__syncthreads();
float3 agentPos = float3();
agentPos.x = coreData->myAgents.x[a];
agentPos.y = coreData->myAgents.y[a];
agentPos.z = coreData->myAgents.z[a];
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
//position of this partition
float3 pos = float3();
pos.x = coreData->myPartitions.pos[part].x;
pos.y = coreData->myPartitions.pos[part].y;
pos.z = coreData->myPartitions.pos[part].z;
//check if the agent is in this partition
if (pos.x != 0 && pos.y != 0 && pos.z != 0){
if (CheckBounding(agentPos, coreData->myAgents.AGGRORANGE, pos, halfDim))
{
int v = (a*8) + atomicAdd(&coreData->myAgents.partCount[a], 1);
__syncthreads();
agentsPartitions[v] = part;
}
}
}
#pragma endregion
hipError_t cudaCopyCore(CopyOnce* coreData)
{
hipError_t cudaStatus;
//COPY DATA TO THE GPU
//////////////////////
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cudaStatus;
}
// Allocate GPU buffers for the data
// CoreData
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_coreData, sizeof(CopyOnce));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
// Copy data to the gpu.
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_coreData, coreData, sizeof(CopyOnce), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
return cudaStatus;
}
hipError_t cudaGPUBasic(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec)
{
//COPY DATA TO THE GPU
//////////////////////
CopyOnce* d_coreData = 0;
CopyEachFrame* d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for the data
// Agents
cudaStatus = hipMalloc((void**)&d_coreData, sizeof(CopyOnce));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Players
cudaStatus = hipMalloc((void**)&d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// copy pointers from structs (urgh)
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(d_coreData, coreData, sizeof(CopyOnce), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//RUN THE KERNALS ON THE GPU
////////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaFSM), dim3(gridSize), dim3(blockSize), 0, 0, d_coreData, d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//COPY THE DATA BACK OFF OF THE GPU
///////////////////////////////////
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(coreData, d_coreData, sizeof(CopyOnce), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed CoreData!");
goto Error;
}
cudaStatus = hipMemcpy(updateData, d_updateData, sizeof(CopyEachFrame), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed UpdateData!");
goto Error;
}
Error:
hipFree(d_coreData);
hipFree(d_updateData);
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
hipError_t cudaGPUCopyOnce(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//RUN THE KERNALS ON THE GPU
////////////////////////////
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers, 0, partitionCount);
// Round up according to array size
gridSize = (partitionCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphasePlayers), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "1st hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphaseAgents), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "2nd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaFSM), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "3rd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back //POTENTIAL PROBLEM HERE
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
hipError_t cudaGPUBroad(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphasePlayers2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "1st hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (agentCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphaseAgents), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "2nd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaFSM), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "3rd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
hipError_t cudaGPUBroadAgents(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad) {
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphasePlayers2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "1st hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents2, 0, partitionCount*agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (partitionCount*agentCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphaseAgents2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "2nd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaFSM), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "3rd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
hipError_t cudaGPUBroadAgents2(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphasePlayers2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "1st hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, partitionCount*agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (partitionCount*agentCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphaseAgents2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "2nd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaFSM), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "3rd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
hipError_t cudaGPUSplit(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphasePlayers2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "1st hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (agentCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphaseAgents), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "2nd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSMSplit, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaFSMSplit), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "3rd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
hipError_t cudaGPUSort(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
hipError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//created an indicies array and storage arrays
static int *index = (int*) malloc(Agents::MAXAGENTS * sizeof(int));
static int *iStore = (int*) malloc(Agents::MAXAGENTS * sizeof(int));
static float *fStore = (float*) malloc(Agents::MAXAGENTS * sizeof(float));
//memset(index, 0, coreData->myAgents.MAXAGENTS * sizeof(int));
static AgentAbilities *aStore = (AgentAbilities*) malloc(Agents::MAXAGENTS * sizeof(AgentAbilities));
static PatrolLocations *vStore = (PatrolLocations*) malloc(Agents::MAXAGENTS * sizeof(PatrolLocations));
memset(aStore, 0, coreData->myAgents.MAXAGENTS * sizeof(AgentAbilities));
memset(vStore, 0, coreData->myAgents.MAXAGENTS * sizeof(PatrolLocations));
//I need to do this at some point, once. Consider moving initialisation of astore/vstore outside of this function. IT STILL COUNTS AS A MEMORY LEAK YOU NOOB :D
//free(aStore);
//free(vStore);
//fill index array
for (int i = 0; i < coreData->myAgents.MAXAGENTS; ++i)
{
index[i] = i;
}
//sort data via state
thrust::sort_by_key(coreData->myAgents.state, coreData->myAgents.state + agentCount, index);
cudaStatus = hipDeviceSynchronize();
//sort extra data via index
thrust::gather(index, index + agentCount, coreData->myAgents.x, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.x);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.y, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.y);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.z, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.z);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.level, iStore);
thrust::copy(iStore, iStore + agentCount, coreData->myAgents.level);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.targetLocation, iStore);
thrust::copy(iStore, iStore + agentCount, coreData->myAgents.targetLocation);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.targetPlayer, iStore);
thrust::copy(iStore, iStore + agentCount, coreData->myAgents.targetPlayer);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.waitedTime, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.waitedTime);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.myAbilities, aStore);
thrust::copy(aStore, aStore + agentCount, coreData->myAgents.myAbilities);
cudaStatus = hipDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.patrolLocation, vStore);
thrust::copy(vStore, vStore + agentCount, coreData->myAgents.patrolLocation);
cudaStatus = hipDeviceSynchronize();
//count how many agents are in each state
for (int i = 0; i < coreData->myAgents.MAXAGENTS; ++i)
{
coreData->myAgents.stateCount[ coreData->myAgents.state[i] ]++;
}
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
//copy Core Data
cudaCopyCore(coreData);
// Update Data
cudaStatus = hipMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = hipMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = hipMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = hipMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = hipMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphasePlayers2), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "1st hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaBroadphaseAgents), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "2nd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
}
//run Patrol agents
if (coreData->myAgents.stateCount[0] != 0)
{
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunPatrol, 0, coreData->myAgents.stateCount[0]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[0] + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaRunPatrol), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
}
//run stare agents
if (coreData->myAgents.stateCount[1] != 0)
{
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunStare, 0, coreData->myAgents.stateCount[1]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[1] + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaRunStare), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
}
//run chase agents
if (coreData->myAgents.stateCount[2] != 0)
{
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunChase, 0, coreData->myAgents.stateCount[2]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[2] + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaRunChase), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
}
//run attack agents
if (coreData->myAgents.stateCount[3] != 0)
{
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunAbility, 0, coreData->myAgents.stateCount[3]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[3] + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaRunAbility), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
}
//run leash agents
if (coreData->myAgents.stateCount[4] != 0)
{
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunLeash, 0, coreData->myAgents.stateCount[4]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[4] + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaRunLeash), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
}
//get the mingrid and blocksize
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunReduce, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( cudaRunReduce), dim3(gridSize), dim3(blockSize), 0, 0, AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "3rd hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, hipGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
hipFree(d_agentPartitions);
hipFree(d_partitionPlayers);
return cudaStatus;
}
void clearCoreData()
{
hipFree(AIManager::GetInstance()->d_coreData);
}
hipError_t copyDataFromGPU(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec)
{
hipError_t cudaStatus;
//COPY THE DATA BACK OFF OF THE GPU
///////////////////////////////////
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(coreData, AIManager::GetInstance()->d_coreData, sizeof(CopyOnce), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed CoreData!");
return cudaStatus;
}
cudaStatus = hipMemcpy(updateData, AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed UpdateData!");
return cudaStatus;
}
//clear updateData so we can send it again
hipFree(AIManager::GetInstance()->d_updateData);
return cudaStatus;
} | 3aba78bf97d7ab179b5f4dfbf13c4dd7b17836c7.cu | #include "kernal.cuh"
#include <cmath>
#include <assert.h>
//TODO CHANGE EVERYTHING TO POINTERS, WHY DIDNT I DO THAT IN THE FIRST PLACE
//TODO GET RID OFF ALL THE VECTOR3 TO FLOAT3 TRANSLATIONS
__device__ bool CheckBounding(float3 nPos, float aggroRange, float3 pos, float3 halfDim)
{
float dist = abs(pos.x - nPos.x);
float sum = halfDim.x + aggroRange;
if(dist <= sum) {
dist = abs(pos.y - nPos.y);
sum = halfDim.y + aggroRange;
if(dist <= sum) {
dist = abs(pos.z - nPos.z);
sum = halfDim.z + aggroRange;
if(dist <= sum) {
//if there is collision data storage
return true;
}
}
}
return false;
}
#pragma region Together States
__device__ void cudaPatrol(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float MAXSPEED = 0.5F;
//at target
float diffX = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f)
{
//get new target
coreData->myAgents.targetLocation[a]++;
coreData->myAgents.targetLocation[a] = coreData->myAgents.targetLocation[a] % 2; //need to fix this
}
else
{
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
//find how much it needs to move
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += moveX * ((float(0) < diffX) - (diffX < float(0)));
coreData->myAgents.z[a] += moveZ * ((float(0) < diffZ) - (diffZ < float(0)));
}
//state transition
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
if (dist < aggroRange && !updateData->playerIsDead[p])
{
coreData->myAgents.state[a] = STARE_AT_PLAYER; //change state
coreData->myAgents.patrolLocation[a].loc[2].x = coreData->myAgents.x[a];
coreData->myAgents.patrolLocation[a].loc[2].y = coreData->myAgents.y[a];
coreData->myAgents.patrolLocation[a].loc[2].z = coreData->myAgents.z[a]; //set position it left patrol
coreData->myAgents.targetPlayer[a] = p; // playing that is being stared at
i = coreData->myPlayers.MAXPLAYERS; // exit the loop
}
++j;
}
++i;
}
}
__device__ void cudaStareAtPlayer(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
int p = coreData->myAgents.targetPlayer[a]; // target player
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//the range of aggro, and pull, to the player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
float pullRange = (aggroRange * 0.75f) * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]);
coreData->myAgents.waitedTime[a] += msec;
if ((dist < pullRange || coreData->myAgents.waitedTime[a] > 8000.0f ) && !updateData->playerIsDead[p]) // if the player is in pull range
{
coreData->myAgents.state[a] = CHASE_PLAYER;
coreData->myAgents.waitedTime[a] = 0.0f;
}
else
{
// if the player isnt in pull range check if there are any players closer
bool playerClose = false;
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p2 = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diffNew = float3();
diffNew.x = coreData->myPlayers.x[p2] - coreData->myAgents.x[a];
diffNew.y = coreData->myPlayers.y[p2] - coreData->myAgents.y[a];
diffNew.z = coreData->myPlayers.z[p2] - coreData->myAgents.z[a];
float distNew = sqrtf((diffNew.x*diffNew.x)+(diffNew.y*diffNew.y)+(diffNew.z*diffNew.z));
// if the new distance is less switch targte
if (distNew <= dist && !updateData->playerIsDead[p2])
{
coreData->myAgents.targetPlayer[a] = p2;
dist = distNew;
float aggroRangeNew = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * (coreData->myAgents.level[a] / coreData->myPlayers.level[p2]));
if (dist < aggroRangeNew)
{
playerClose = true;
}
}
++j;
}
++i;
}
// if there are no close players at all
if (!playerClose)
{
coreData->myAgents.waitedTime[a] = 0.0f;
coreData->myAgents.state[a] = PATROL;
coreData->myAgents.targetPlayer[a] = -1;
}
}
}
__device__ void cudaChasePlayer(CopyOnce* coreData, CopyEachFrame* updateData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float LEASHRANGE = 3200.0f;
float ATTACKRANGE = 75.0f;
float MAXSPEED = 0.5F;
int p = coreData->myAgents.targetPlayer[a];
//calculate distance to leash spot
float3 diff = float3();
diff.x = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
diff.y = coreData->myAgents.patrolLocation[a].loc[2].y - coreData->myAgents.y[a];
diff.z = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float leashDist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));;
// if its too far away or if the player died leash back
if (leashDist > LEASHRANGE || updateData->playerIsDead[p] || coreData->myAgents.targetPlayer[a] == -1)
{
coreData->myAgents.state[a] = LEASH;
coreData->myAgents.targetPlayer[a] = -1;
}
else
{
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if close to player switch state to useability
if (dist < ATTACKRANGE)
{
coreData->myAgents.state[a] = USE_ABILITY;
}
//move towards players location
float absX = abs(diff.x);
float absZ = abs(diff.z);
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += moveX * ((float(0) < diff.x) - (diff.x < float(0)));
coreData->myAgents.z[a] += moveZ * ((float(0) < diff.z) - (diff.z < float(0)));
}
}
__device__ void cudaLeashBack(CopyOnce* coreData, CopyEachFrame* updateData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float MAXSPEED = 0.5F;
//calculate distance to leash spot
float diffX = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f)
{
//change back to patrol
coreData->myAgents.state[a] = PATROL;
}
else
{
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += moveX * ((float(0) < diffX) - (diffX < float(0)));
coreData->myAgents.z[a] += moveZ * ((float(0) < diffZ) - (diffZ < float(0)));
}
}
__device__ void cudaReduceCooldowns(CopyOnce* coreData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0 ; i < coreData->myAgents.myAbilities->MAXABILITIES; ++i)
{
int check = coreData->myAgents.myAbilities[a].abil[i].cooldown > 0;
coreData->myAgents.myAbilities[a].abil[i].cooldown -= msec * check;
}
}
__device__ void cudaUseAbility(CopyOnce* coreData, CopyEachFrame* updateData, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float ATTACKRANGE = 75.0f;
int p = coreData->myAgents.targetPlayer[a];
if (updateData->playerIsDead[p]) // if the player is dead
{
coreData->myAgents.state[a] = LEASH; //leash back
coreData->myAgents.targetPlayer[a] = -1; // set the target player to null
}
else
{
//TODO ADD ABILITIES BACK
//look through abilities via priority until one is found not on cooldown
int i = 0;
while (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown > 0.001f) {
i++;
}
//cast ability
if (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown < 0.001f)
{
coreData->myAgents.myAbilities[a].abil[i].cooldown = coreData->myAgents.myAbilities[a].abil[i].maxCooldown;
coreData->myPlayers.hp[coreData->myAgents.targetPlayer[a]] -= coreData->myAgents.myAbilities[a].abil[i].damage;
}
//if the player goes out of range, change state to chase
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
if (dist > (ATTACKRANGE))
{
coreData->myAgents.state[a] = CHASE_PLAYER;
}
}
}
__global__ void cudaFSM(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
switch (coreData->myAgents.state[a]) {
case PATROL: cudaPatrol(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case STARE_AT_PLAYER: cudaStareAtPlayer(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case CHASE_PLAYER: cudaChasePlayer(coreData, updateData, msec);
break;
case LEASH: cudaLeashBack(coreData, updateData, msec);
break;
case USE_ABILITY: cudaUseAbility(coreData, updateData, msec);
break;
};
cudaReduceCooldowns(coreData, msec);
}
#pragma endregion
#pragma region Seperated States
__device__ void cudaPatrolState(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
float MAXSPEED = 0.5F;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < coreData->myAgents.stateCount[0]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//at target
float diffX = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[coreData->myAgents.targetLocation[a]].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f && mod)
{
//get new target
coreData->myAgents.targetLocation[a]++;
coreData->myAgents.targetLocation[a] = coreData->myAgents.targetLocation[a] % 2; //need to fix this
}
else
{
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
//find how much it needs to move
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += (moveX * ((float(0) < diffX) - (diffX < float(0)))) * mod;
coreData->myAgents.z[a] += (moveZ * ((float(0) < diffZ) - (diffZ < float(0)))) * mod;
}
}
__device__ void cudaPatrolTransitions(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0) {
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < coreData->myAgents.stateCount[0]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//state transition
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
if (dist < aggroRange && !updateData->playerIsDead[p] && mod)
{
coreData->myAgents.state[a] = STARE_AT_PLAYER; //change state
coreData->myAgents.patrolLocation[a].loc[2].x = coreData->myAgents.x[a];
coreData->myAgents.patrolLocation[a].loc[2].y = coreData->myAgents.y[a];
coreData->myAgents.patrolLocation[a].loc[2].z = coreData->myAgents.z[a]; //set position it left patrol
coreData->myAgents.targetPlayer[a] = p; // playing that is being stared at
i = coreData->myPlayers.MAXPLAYERS; // exit the loop
}
++j;
}
++i;
}
}
__device__ void cudaStareAtPlayerState(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[1]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
coreData->myAgents.waitedTime[a] += msec * mod;
}
__device__ void cudaStareAtPlayerTransitions(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a]; // target player
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[1]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//the range of aggro, and pull, to the player
float aggroRange = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]));
float pullRange = (aggroRange * 0.75f) * ((float)coreData->myAgents.level[a] / (float)coreData->myPlayers.level[p]);
if ((dist < pullRange || coreData->myAgents.waitedTime[a] > 8000.0f ) && !updateData->playerIsDead[p] && mod) // if the player is in pull range
{
coreData->myAgents.state[a] = CHASE_PLAYER;
coreData->myAgents.waitedTime[a] = 0.0f;
coreData->myAgents.y[a] += 5.0f;
}
else
{
// if the player isnt in pull range check if there are any players closer
bool playerClose = false;
int i = 0;
while (i < 8 && agentsPartitions[((a)*8) + i] != -1)
{
int j = 0;
int part = agentsPartitions[(a*8) + i];
int partPlayer = (part*coreData->myPlayers.MAXPLAYERS);
while (j < coreData->myPlayers.MAXPLAYERS && partitionsPlayers[partPlayer+j] != -1)
{
//the player
short p2 = partitionsPlayers[partPlayer+j];
//calculate distance to player
float3 diffNew = float3();
diffNew.x = coreData->myPlayers.x[p2] - coreData->myAgents.x[a];
diffNew.y = coreData->myPlayers.y[p2] - coreData->myAgents.y[a];
diffNew.z = coreData->myPlayers.z[p2] - coreData->myAgents.z[a];
float distNew = sqrtf((diffNew.x*diffNew.x)+(diffNew.y*diffNew.y)+(diffNew.z*diffNew.z));
// if the new distance is less switch targte
if (distNew <= dist && !updateData->playerIsDead[p2] && mod)
{
coreData->myAgents.targetPlayer[a] = p2;
dist = distNew;
float aggroRangeNew = min(coreData->myAgents.AGGRORANGE, coreData->myAgents.AGGRORANGE * (coreData->myAgents.level[a] / coreData->myPlayers.level[p2]));
if (dist < aggroRangeNew)
{
playerClose = true;
}
}
++j;
}
++i;
}
// if there are no close players at all
if (!playerClose && mod)
{
coreData->myAgents.waitedTime[a] = 0.0f;
coreData->myAgents.state[a] = PATROL;
coreData->myAgents.targetPlayer[a] = -1;
}
}
}
__device__ void cudaChasePlayerState(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0)
{
float MAXSPEED = 0.5F;
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a];
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[2]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
//move towards players location
float absX = abs(diff.x);
float absZ = abs(diff.z);
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += (moveX * ((float(0) < diff.x) - (diff.x < float(0)))) * mod;
coreData->myAgents.z[a] += (moveZ * ((float(0) < diff.z) - (diff.z < float(0)))) * mod;
}
__device__ void cudaChasePlayerTransitions(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0) {
float LEASHRANGE = 3200.0f;
float ATTACKRANGE = 75.0f;
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a];
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[2]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to leash spot
float3 diff = float3();
diff.x = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
diff.y = coreData->myAgents.patrolLocation[a].loc[2].y - coreData->myAgents.y[a];
diff.z = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float leashDist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));;
// if its too far away or if the player died leash back
if ((leashDist > LEASHRANGE || updateData->playerIsDead[p]) && mod)
{
coreData->myAgents.state[a] = LEASH;
coreData->myAgents.targetPlayer[a] = -1;
}
else
{
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if close to player switch state to useability
if (dist < ATTACKRANGE && mod)
{
coreData->myAgents.state[a] = USE_ABILITY;
}
}
}
__device__ void cudaUseAbilityState(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0)
{
int a = (blockIdx.x * blockDim.x + threadIdx.x) + sCount;
int p = coreData->myAgents.targetPlayer[a];
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[3]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//look through abilities via priority until one is found not on cooldown
int i = 0;
while (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown > 0.001f) {
++i;
}
//cast ability
if (i < coreData->myAgents.myAbilities->MAXABILITIES && coreData->myAgents.myAbilities[a].abil[i].cooldown < 0.001f && mod)
{
coreData->myAgents.myAbilities[a].abil[i].cooldown = coreData->myAgents.myAbilities[a].abil[i].maxCooldown;
coreData->myPlayers.hp[p] -= coreData->myAgents.myAbilities[a].abil[i].damage;
}
}
__device__ void cudaUseAbilityTransitions(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0) {
int a = blockIdx.x * blockDim.x + threadIdx.x + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[3]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
float ATTACKRANGE = 75.0f;
int p = coreData->myAgents.targetPlayer[a];
if (updateData->playerIsDead[p] && mod) // if the player is dead
{
coreData->myAgents.state[a] = LEASH; //leash back
coreData->myAgents.targetPlayer[a] = -1; // set the target player to null
}
else
{
//if the player goes out of range, change state to chase
//calculate distance to player
float3 diff = float3();
diff.x = coreData->myPlayers.x[p] - coreData->myAgents.x[a];
diff.y = coreData->myPlayers.y[p] - coreData->myAgents.y[a];
diff.z = coreData->myPlayers.z[p] - coreData->myAgents.z[a];
float dist = sqrtf((diff.x*diff.x)+(diff.y*diff.y)+(diff.z*diff.z));
//if player close transition state to stare at player
if (dist > (ATTACKRANGE) && mod)
{
coreData->myAgents.state[a] = CHASE_PLAYER;
}
}
}
__device__ void cudaLeashBackState(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0)
{
int a = blockIdx.x * blockDim.x + threadIdx.x + sCount;
float MAXSPEED = 0.5F;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[4]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to leash spot
float diffX = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//move to target
float dis = absX + absZ;
float moveX = ((absX / dis) * MAXSPEED) * msec;
float moveZ = ((absZ / dis) * MAXSPEED) * msec;
moveX = min(moveX, absX);
moveZ = min(moveZ, absZ);
//set new position
coreData->myAgents.x[a] += (moveX * ((float(0) < diffX) - (diffX < float(0)))) * mod;
coreData->myAgents.z[a] += (moveZ * ((float(0) < diffZ) - (diffZ < float(0)))) * mod;
}
__device__ void cudaLeashBackTransitions(CopyOnce* coreData, CopyEachFrame* updateData, float msec, int sCount = 0) {
int a = blockIdx.x * blockDim.x + threadIdx.x + sCount;
#if defined (SPLIT_GPU) || defined (SPLIT_GPU_BROAD)
int mod = 1;
#else
int mod = a < sCount + coreData->myAgents.stateCount[4]; // if this is a correct thread
a = a % coreData->myAgents.MAXAGENTS;
#endif
//calculate distance to leash spot
float diffX = coreData->myAgents.patrolLocation[a].loc[2].x - coreData->myAgents.x[a];
float diffZ = coreData->myAgents.patrolLocation[a].loc[2].z - coreData->myAgents.z[a];
float absX = abs(diffX);
float absZ = abs(diffZ);
//check its close enough to the point
if (absX < 0.1f && absZ < 0.1f && mod)
{
//change back to patrol
coreData->myAgents.state[a] = PATROL;
}
}
#pragma region Globals
__global__ void cudaFSMSplit(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
switch (coreData->myAgents.state[a]) {
case PATROL: cudaPatrolState(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
cudaPatrolTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case STARE_AT_PLAYER: cudaStareAtPlayerState(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
cudaStareAtPlayerTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
break;
case CHASE_PLAYER: cudaChasePlayerState(coreData, updateData, msec);
cudaChasePlayerTransitions(coreData, updateData, msec);
break;
case LEASH: cudaLeashBackState(coreData, updateData, msec);
cudaLeashBackTransitions(coreData, updateData, msec);
break;
case USE_ABILITY: cudaUseAbilityState(coreData, updateData, msec);
cudaUseAbilityTransitions(coreData, updateData, msec);
break;
};
cudaReduceCooldowns(coreData, msec);
}
__global__ void cudaRunPatrol(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec) {
int a = blockIdx.x * blockDim.x + threadIdx.x;
cudaPatrolState(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
cudaPatrolTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec);
}
__global__ void cudaRunStare(CopyOnce* coreData, CopyEachFrame* updateData, short* agentsPartitions, short* partitionsPlayers, float msec) {
int count = coreData->myAgents.stateCount[0];
cudaStareAtPlayerState(coreData, updateData, agentsPartitions, partitionsPlayers, msec, count);
cudaStareAtPlayerTransitions(coreData, updateData, agentsPartitions, partitionsPlayers, msec, count);
}
__global__ void cudaRunChase(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
int count = coreData->myAgents.stateCount[0] + coreData->myAgents.stateCount[1];
cudaChasePlayerState(coreData, updateData, msec, count);
cudaChasePlayerTransitions(coreData, updateData, msec, count);
}
__global__ void cudaRunAbility(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
int count = coreData->myAgents.stateCount[0] + coreData->myAgents.stateCount[1] + coreData->myAgents.stateCount[2];
cudaUseAbilityState(coreData, updateData, msec, count);
cudaUseAbilityTransitions(coreData, updateData, msec, count);
}
__global__ void cudaRunLeash(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
int count = coreData->myAgents.stateCount[0] + coreData->myAgents.stateCount[1] + coreData->myAgents.stateCount[2] + coreData->myAgents.stateCount[3];
cudaLeashBackState(coreData, updateData, msec, count);
cudaLeashBackTransitions(coreData, updateData, msec, count);
}
__global__ void cudaRunReduce(CopyOnce* coreData, CopyEachFrame* updateData, float msec) {
cudaReduceCooldowns(coreData, msec);
}
#pragma endregion
#pragma endregion
#pragma region Broadphase
__global__ void cudaBroadphasePlayers(CopyOnce* coreData, CopyEachFrame* updateData, short* partitionsPlayers)
{
int pa = blockIdx.x * blockDim.x + threadIdx.x;
//position of this partition
float3 pos = float3();
pos.x = coreData->myPartitions.pos[pa].x;
pos.y = coreData->myPartitions.pos[pa].y;
pos.z = coreData->myPartitions.pos[pa].z;
if (pos.x != 0 && pos.y != 0 && pos.z != 0)
{
updateData->playerCount[pa] = 0;
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
//loop through the players
for (int j = 0; j < coreData->myPlayers.MAXPLAYERS; ++j) {
//check the player exists
if(!updateData->playerIsDead[j] && coreData->myPlayers.maxHP[j] > 0)
{
//players position
float3 playerPos = float3();
playerPos.x = coreData->myPlayers.x[j];
playerPos.y = coreData->myPlayers.y[j];
playerPos.z = coreData->myPlayers.z[j];
//check if its in the partition
if (CheckBounding(playerPos, 0, pos, halfDim))
{
//add to the partitions players
partitionsPlayers[(pa*coreData->myPlayers.MAXPLAYERS) + updateData->playerCount[pa]] = j;
++updateData->playerCount[pa];
}
}
}
}
}
__global__ void cudaBroadphasePlayers2(CopyOnce* coreData, CopyEachFrame* updateData, short* partitionsPlayers, const int partitionCount)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int part = t % partitionCount;
int p = t / partitionCount;
if (p == 0) updateData->playerCount[part] = 0;
__syncthreads();
if(!updateData->playerIsDead[p] && coreData->myPlayers.maxHP[p] > 0) {
//players position
float3 playerPos = float3();
playerPos.x = coreData->myPlayers.x[p];
playerPos.y = coreData->myPlayers.y[p];
playerPos.z = coreData->myPlayers.z[p];
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
//position of this partition
float3 partPos = float3();
partPos.x = coreData->myPartitions.pos[part].x;
partPos.y = coreData->myPartitions.pos[part].y;
partPos.z = coreData->myPartitions.pos[part].z;
//check if its in the partition
if (CheckBounding(playerPos, 0, partPos, halfDim))
{
//add to the partitions players
int t = (part*coreData->myPlayers.MAXPLAYERS) + atomicAdd(&updateData->playerCount[part], 1);
__syncthreads();
partitionsPlayers[t] = p;
}
}
}
__global__ void cudaBroadphasePlayersCondence(CopyOnce* coreData, CopyEachFrame* updateData, short* partitionsPlayers, int partitionCount)
{
int part = blockIdx.x * blockDim.x + threadIdx.x;
updateData->playerCount[part] = 0;
for (int i = 0; i < coreData->myPlayers.MAXPLAYERS; ++i)
{
if (partitionsPlayers[(part*coreData->myPlayers.MAXPLAYERS) + i] != -1)
{
partitionsPlayers[(part*coreData->myPlayers.MAXPLAYERS) + updateData->playerCount[part]] = partitionsPlayers[(part*coreData->myPlayers.MAXPLAYERS) + i];
++updateData->playerCount[part];
}
}
}
__global__ void cudaBroadphaseAgents(CopyOnce* coreData, CopyEachFrame* updateData,short* agentsPartitions, const int partitionCount)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
float3 agentPos = float3();
agentPos.x = coreData->myAgents.x[a];
agentPos.y = coreData->myAgents.y[a];
agentPos.z = coreData->myAgents.z[a];
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
int p = 0;
//loop through the world partitions
for (int j = 0; j < partitionCount; ++j) {
//position of this partition
float3 pos = float3();
pos.x = coreData->myPartitions.pos[j].x;
pos.y = coreData->myPartitions.pos[j].y;
pos.z = coreData->myPartitions.pos[j].z;
//check if the agent is in this partition
if (pos.x != 0 && pos.y != 0 && pos.z != 0){
if (CheckBounding(agentPos, coreData->myAgents.AGGRORANGE, pos, halfDim))
{
agentsPartitions[(a*8) + p] = j;
++p;
}
}
}
}
__global__ void cudaBroadphaseAgents2(CopyOnce* coreData, CopyEachFrame* updateData,short* agentsPartitions, const int partitionCount)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int part = t % partitionCount;
int a = t / partitionCount;
if (part == 0) coreData->myAgents.partCount[a] = 0;
__syncthreads();
float3 agentPos = float3();
agentPos.x = coreData->myAgents.x[a];
agentPos.y = coreData->myAgents.y[a];
agentPos.z = coreData->myAgents.z[a];
// half dimensions of the partitions
float3 halfDim = float3();
halfDim.x = coreData->myPartitions.halfDim.x;
halfDim.y = coreData->myPartitions.halfDim.y;
halfDim.z = coreData->myPartitions.halfDim.z;
//position of this partition
float3 pos = float3();
pos.x = coreData->myPartitions.pos[part].x;
pos.y = coreData->myPartitions.pos[part].y;
pos.z = coreData->myPartitions.pos[part].z;
//check if the agent is in this partition
if (pos.x != 0 && pos.y != 0 && pos.z != 0){
if (CheckBounding(agentPos, coreData->myAgents.AGGRORANGE, pos, halfDim))
{
int v = (a*8) + atomicAdd(&coreData->myAgents.partCount[a], 1);
__syncthreads();
agentsPartitions[v] = part;
}
}
}
#pragma endregion
cudaError_t cudaCopyCore(CopyOnce* coreData)
{
cudaError_t cudaStatus;
//COPY DATA TO THE GPU
//////////////////////
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cudaStatus;
}
// Allocate GPU buffers for the data
// CoreData
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_coreData, sizeof(CopyOnce));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// Copy data to the gpu.
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_coreData, coreData, sizeof(CopyOnce), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
return cudaStatus;
}
cudaError_t cudaGPUBasic(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec)
{
//COPY DATA TO THE GPU
//////////////////////
CopyOnce* d_coreData = 0;
CopyEachFrame* d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for the data
// Agents
cudaStatus = cudaMalloc((void**)&d_coreData, sizeof(CopyOnce));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Players
cudaStatus = cudaMalloc((void**)&d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// copy pointers from structs (urgh)
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(d_coreData, coreData, sizeof(CopyOnce), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//RUN THE KERNALS ON THE GPU
////////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaFSM<<<gridSize, blockSize>>>(d_coreData, d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//COPY THE DATA BACK OFF OF THE GPU
///////////////////////////////////
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(coreData, d_coreData, sizeof(CopyOnce), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed CoreData!");
goto Error;
}
cudaStatus = cudaMemcpy(updateData, d_updateData, sizeof(CopyEachFrame), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed UpdateData!");
goto Error;
}
Error:
cudaFree(d_coreData);
cudaFree(d_updateData);
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
cudaError_t cudaGPUCopyOnce(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//RUN THE KERNALS ON THE GPU
////////////////////////////
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers, 0, partitionCount);
// Round up according to array size
gridSize = (partitionCount + blockSize - 1) / blockSize;
cudaBroadphasePlayers<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "1st cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
cudaBroadphaseAgents<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "2nd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaFSM<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "3rd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back //POTENTIAL PROBLEM HERE
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
cudaError_t cudaGPUBroad(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
cudaBroadphasePlayers2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "1st cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (agentCount + blockSize - 1) / blockSize;
cudaBroadphaseAgents<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "2nd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaFSM<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "3rd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
cudaError_t cudaGPUBroadAgents(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad) {
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
cudaBroadphasePlayers2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "1st cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents2, 0, partitionCount*agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (partitionCount*agentCount + blockSize - 1) / blockSize;
cudaBroadphaseAgents2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "2nd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaFSM<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "3rd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
cudaError_t cudaGPUBroadAgents2(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
cudaBroadphasePlayers2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "1st cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, partitionCount*agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (partitionCount*agentCount + blockSize - 1) / blockSize;
cudaBroadphaseAgents2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "2nd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSM, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaFSM<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "3rd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
cudaError_t cudaGPUSplit(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
// Update Data
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
cudaBroadphasePlayers2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "1st cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
//gridSize = (agentCount*partitionCount + blockSize - 1) / blockSize;
gridSize = (agentCount + blockSize - 1) / blockSize;
cudaBroadphaseAgents<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "2nd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaFSMSplit, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaFSMSplit<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "3rd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
cudaError_t cudaGPUSort(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec, bool runBroad)
{
AIManager::GetInstance()->d_updateData = 0;
short* d_agentPartitions = 0;
short* d_partitionPlayers = 0;
cudaError_t cudaStatus;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
//created an indicies array and storage arrays
static int *index = (int*) malloc(Agents::MAXAGENTS * sizeof(int));
static int *iStore = (int*) malloc(Agents::MAXAGENTS * sizeof(int));
static float *fStore = (float*) malloc(Agents::MAXAGENTS * sizeof(float));
//memset(index, 0, coreData->myAgents.MAXAGENTS * sizeof(int));
static AgentAbilities *aStore = (AgentAbilities*) malloc(Agents::MAXAGENTS * sizeof(AgentAbilities));
static PatrolLocations *vStore = (PatrolLocations*) malloc(Agents::MAXAGENTS * sizeof(PatrolLocations));
memset(aStore, 0, coreData->myAgents.MAXAGENTS * sizeof(AgentAbilities));
memset(vStore, 0, coreData->myAgents.MAXAGENTS * sizeof(PatrolLocations));
//I need to do this at some point, once. Consider moving initialisation of astore/vstore outside of this function. IT STILL COUNTS AS A MEMORY LEAK YOU NOOB :D
//free(aStore);
//free(vStore);
//fill index array
for (int i = 0; i < coreData->myAgents.MAXAGENTS; ++i)
{
index[i] = i;
}
//sort data via state
thrust::sort_by_key(coreData->myAgents.state, coreData->myAgents.state + agentCount, index);
cudaStatus = cudaDeviceSynchronize();
//sort extra data via index
thrust::gather(index, index + agentCount, coreData->myAgents.x, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.x);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.y, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.y);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.z, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.z);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.level, iStore);
thrust::copy(iStore, iStore + agentCount, coreData->myAgents.level);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.targetLocation, iStore);
thrust::copy(iStore, iStore + agentCount, coreData->myAgents.targetLocation);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.targetPlayer, iStore);
thrust::copy(iStore, iStore + agentCount, coreData->myAgents.targetPlayer);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.waitedTime, fStore);
thrust::copy(fStore, fStore + agentCount, coreData->myAgents.waitedTime);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.myAbilities, aStore);
thrust::copy(aStore, aStore + agentCount, coreData->myAgents.myAbilities);
cudaStatus = cudaDeviceSynchronize();
thrust::gather(index, index + agentCount, coreData->myAgents.patrolLocation, vStore);
thrust::copy(vStore, vStore + agentCount, coreData->myAgents.patrolLocation);
cudaStatus = cudaDeviceSynchronize();
//count how many agents are in each state
for (int i = 0; i < coreData->myAgents.MAXAGENTS; ++i)
{
coreData->myAgents.stateCount[ coreData->myAgents.state[i] ]++;
}
//COPY THE NEW DATA TO THE GPU
//////////////////////////////
//copy Core Data
cudaCopyCore(coreData);
// Update Data
cudaStatus = cudaMalloc((void**)&AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
// Pointer Data
cudaStatus = cudaMalloc((void**)&d_partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return cudaStatus;
}
//Pass pointers their data
//Update Data
cudaStatus = cudaMemcpy(AIManager::GetInstance()->d_updateData, updateData, sizeof(CopyEachFrame), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Agent's Partitions
cudaStatus = cudaMemcpy(d_agentPartitions, updateData->agentPartitions, (coreData->myAgents.MAXAGENTS*8) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//Partition's Players
cudaStatus = cudaMemcpy(d_partitionPlayers, updateData->partitionPlayers, (coreData->myPartitions.MAXPARTITIONS*coreData->myPlayers.MAXPLAYERS) * sizeof(short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//run the broadphase on the gpu
if (runBroad)
{
//BROADPHASE FOR PLAYERS
////////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphasePlayers2, 0, partitionCount*coreData->myPlayers.MAXPLAYERS);
// Round up according to array size
gridSize = (partitionCount*coreData->myPlayers.MAXPLAYERS + blockSize - 1) / blockSize;
cudaBroadphasePlayers2<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_partitionPlayers, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "1st cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//BROADPHASE FOR AGENTS
///////////////////////
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaBroadphaseAgents, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
cudaBroadphaseAgents<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, partitionCount);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "2nd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
}
//run Patrol agents
if (coreData->myAgents.stateCount[0] != 0)
{
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunPatrol, 0, coreData->myAgents.stateCount[0]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[0] + blockSize - 1) / blockSize;
cudaRunPatrol<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
}
//run stare agents
if (coreData->myAgents.stateCount[1] != 0)
{
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunStare, 0, coreData->myAgents.stateCount[1]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[1] + blockSize - 1) / blockSize;
cudaRunStare<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, d_agentPartitions, d_partitionPlayers, msec);
}
//run chase agents
if (coreData->myAgents.stateCount[2] != 0)
{
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunChase, 0, coreData->myAgents.stateCount[2]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[2] + blockSize - 1) / blockSize;
cudaRunChase<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
}
//run attack agents
if (coreData->myAgents.stateCount[3] != 0)
{
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunAbility, 0, coreData->myAgents.stateCount[3]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[3] + blockSize - 1) / blockSize;
cudaRunAbility<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
}
//run leash agents
if (coreData->myAgents.stateCount[4] != 0)
{
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunLeash, 0, coreData->myAgents.stateCount[4]);
// Round up according to array size
gridSize = (coreData->myAgents.stateCount[4] + blockSize - 1) / blockSize;
cudaRunLeash<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
}
//get the mingrid and blocksize
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cudaRunReduce, 0, agentCount);
// Round up according to array size
gridSize = (agentCount + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
cudaRunReduce<<<gridSize, blockSize>>>(AIManager::GetInstance()->d_coreData, AIManager::GetInstance()->d_updateData, msec);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "3rd cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
fprintf(stderr, cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//clear partition data as we dont need to copy it back
cudaFree(d_agentPartitions);
cudaFree(d_partitionPlayers);
return cudaStatus;
}
void clearCoreData()
{
cudaFree(AIManager::GetInstance()->d_coreData);
}
cudaError_t copyDataFromGPU(CopyOnce* coreData, CopyEachFrame* updateData, const unsigned int agentCount, const unsigned int partitionCount, float msec)
{
cudaError_t cudaStatus;
//COPY THE DATA BACK OFF OF THE GPU
///////////////////////////////////
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(coreData, AIManager::GetInstance()->d_coreData, sizeof(CopyOnce), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed CoreData!");
return cudaStatus;
}
cudaStatus = cudaMemcpy(updateData, AIManager::GetInstance()->d_updateData, sizeof(CopyEachFrame), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed UpdateData!");
return cudaStatus;
}
//clear updateData so we can send it again
cudaFree(AIManager::GetInstance()->d_updateData);
return cudaStatus;
} |
06aa2a7445da3387a5a981a827d46eb6c282ef3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main( void ) {
hipDeviceProp_t prop;
int count;
hipError_t err = hipGetDeviceCount( &count );
if (err != hipSuccess) {
printf("Error: %s", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i=0; i< count; i++) {
err = hipGetDeviceProperties( &prop, i );
if (err != hipSuccess) {printf("Error: %s", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
| 06aa2a7445da3387a5a981a827d46eb6c282ef3e.cu | #include <stdio.h>
int main( void ) {
cudaDeviceProp prop;
int count;
cudaError_t err = cudaGetDeviceCount( &count );
if (err != cudaSuccess) {
printf("Error: %s", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i=0; i< count; i++) {
err = cudaGetDeviceProperties( &prop, i );
if (err != cudaSuccess) {printf("Error: %s", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
1223b9764ca46182dd3612af40fb907bd6fbeb48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/lstm_unit_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype cuda_sigmoid(Dtype x) {
return 1. / (1. + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_sigmoid_diff(Dtype x) {
return x * (1. - x);
}
template <typename Dtype>
__device__ Dtype cuda_tanh(Dtype x) {
Dtype exp2x = exp(2 * x);
return fabs(x) < Dtype(5) ? ((exp2x - Dtype(1)) / (exp2x + Dtype(1))) :
(x > 0 ? Dtype(1) : Dtype(-1));
}
template <typename Dtype>
__device__ Dtype cuda_tanh_diff(Dtype x) {
return (1. - x * x);
}
template <typename Dtype>
__global__ void ForwardCombineGates(
int n,
const Dtype* prev_state_data,
Dtype* input_gates,
Dtype* forget_gates,
Dtype* output_gates,
Dtype* input_values,
Dtype* tanh_next_memory_state,
Dtype* next_memory_state,
Dtype* next_hidden_state) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates[idx] = cuda_sigmoid(input_gates[idx]);
forget_gates[idx] = cuda_sigmoid(forget_gates[idx]);
output_gates[idx] = cuda_sigmoid(output_gates[idx]);
input_values[idx] = cuda_tanh(input_values[idx]);
next_memory_state[idx] = prev_state_data[idx] * forget_gates[idx] +
input_gates[idx] * input_values[idx];
tanh_next_memory_state[idx] = cuda_tanh(next_memory_state[idx]);
next_hidden_state[idx] = tanh_next_memory_state[idx] * output_gates[idx];
}
}
template <typename Dtype>
__global__ void BackwardGates(
int n,
const Dtype* input_gates,
const Dtype* forget_gates,
const Dtype* output_gates,
const Dtype* input_values,
const Dtype* tanh_next_memory_state,
Dtype* input_gates_diff,
Dtype* forget_gates_diff,
Dtype* output_gates_diff,
Dtype* input_values_diff,
Dtype* tanh_next_memory_diff) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates_diff[idx] = cuda_sigmoid_diff(input_gates[idx]);
forget_gates_diff[idx] = cuda_sigmoid_diff(forget_gates[idx]);
output_gates_diff[idx] = cuda_sigmoid_diff(output_gates[idx]);
input_values_diff[idx] = cuda_tanh_diff(input_values[idx]);
tanh_next_memory_diff[idx] = cuda_tanh_diff(tanh_next_memory_state[idx]);
}
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
concat_top_vec_.clear();
concat_bottom_vec_.clear();
concat_bottom_vec_.push_back(bottom[0]);
concat_bottom_vec_.push_back(bottom[1]);
concat_top_vec_.push_back(concated_data_.get());
concat_layer_->Forward(concat_bottom_vec_, concat_top_vec_);
const Dtype* input_data = concated_data_->gpu_data();
const Dtype* prev_state_data = bottom[2]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
Dtype* next_hidden_state = top[0]->mutable_gpu_data();
Dtype* next_memory_state = top[1]->mutable_gpu_data();
Dtype* input_gates = input_gates_data_buffer_->mutable_gpu_data();
Dtype* forget_gates = forget_gates_data_buffer_->mutable_gpu_data();
Dtype* output_gates = output_gates_data_buffer_->mutable_gpu_data();
Dtype* input_values = input_values_data_buffer_->mutable_gpu_data();
Dtype* tanh_next_memory_state = tanh_mem_buffer_->mutable_gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_weight,
(Dtype)0., input_values);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_gate_weight,
(Dtype)0., input_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, forget_gate_weight,
(Dtype)0., forget_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, output_gate_weight,
(Dtype)0., output_gates);
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ForwardCombineGates<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
prev_state_data,
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
next_memory_state,
next_hidden_state);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
caffe_gpu_set(this->concated_data_->count(), Dtype(0),
this->concated_data_->mutable_gpu_diff());
const Dtype* input_data = concated_data_->gpu_data();
const Dtype* prev_state_data = bottom[2]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
const Dtype* input_gates = input_gates_data_buffer_->gpu_data();
const Dtype* forget_gates = forget_gates_data_buffer_->gpu_data();
const Dtype* output_gates = output_gates_data_buffer_->gpu_data();
const Dtype* input_values = input_values_data_buffer_->gpu_data();
const Dtype* tanh_next_memory_state = tanh_mem_buffer_->gpu_data();
Dtype* gates_diff = gates_diff_buffer_->mutable_gpu_data();
Dtype* input_gates_diff = gates_diff + channels_ * num_ * 0;
Dtype* forget_gates_diff = gates_diff + channels_ * num_ * 1;
Dtype* output_gates_diff = gates_diff + channels_ * num_ * 2;
Dtype* input_values_diff = gates_diff + channels_ * num_ * 3;
Dtype* tanh_next_memory_diff = tanh_mem_buffer_->mutable_gpu_diff();
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BackwardGates<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count,
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
input_gates_diff,
forget_gates_diff,
output_gates_diff,
input_values_diff,
tanh_next_memory_diff);
CUDA_POST_KERNEL_CHECK;
Dtype* input_weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* input_gate_weight_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* forget_gate_weight_diff = this->blobs_[2]->mutable_gpu_diff();
Dtype* output_gate_weight_diff = this->blobs_[3]->mutable_gpu_diff();
Dtype* input_diff = concated_data_->mutable_gpu_diff();
Dtype* prev_state_diff = bottom[2]->mutable_gpu_diff();
const Dtype* next_hidden_state_diff = top[0]->gpu_diff();
const Dtype* next_memory_state_diff = top[1]->gpu_diff();
Dtype* next_state_tot_diff = next_state_tot_diff_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, output_gates,
next_hidden_state_diff, next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, tanh_next_memory_diff,
next_state_tot_diff, next_state_tot_diff);
caffe_gpu_add(num_ * channels_, next_memory_state_diff,
next_state_tot_diff, next_state_tot_diff);
// caffe_gpu_axpby(num_ * channels_, Dtype(0), next_state_tot_diff,
// Dtype(0.5), next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff,
forget_gates, prev_state_diff);
Dtype* dldg_data = dldg_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, input_gates, input_values_diff, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, input_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, input_gates_diff, input_values, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, input_gate_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, forget_gates_diff, prev_state_data, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., forget_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, forget_gate_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, output_gates_diff, tanh_next_memory_state,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_hidden_state_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., output_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, output_gate_weight,
(Dtype)1., input_diff);
// caffe_gpu_axpby(num_ * input_data_dim_, Dtype(0), input_diff,
// Dtype(0.25), input_diff);
vector<bool> concat_propagate_down(2, true);
concat_layer_->Backward(concat_top_vec_, concat_propagate_down, concat_bottom_vec_);
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmUnitLayer);
} // namespace caffe
| 1223b9764ca46182dd3612af40fb907bd6fbeb48.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/lstm_unit_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype cuda_sigmoid(Dtype x) {
return 1. / (1. + exp(-x));
}
template <typename Dtype>
__device__ Dtype cuda_sigmoid_diff(Dtype x) {
return x * (1. - x);
}
template <typename Dtype>
__device__ Dtype cuda_tanh(Dtype x) {
Dtype exp2x = exp(2 * x);
return fabs(x) < Dtype(5) ? ((exp2x - Dtype(1)) / (exp2x + Dtype(1))) :
(x > 0 ? Dtype(1) : Dtype(-1));
}
template <typename Dtype>
__device__ Dtype cuda_tanh_diff(Dtype x) {
return (1. - x * x);
}
template <typename Dtype>
__global__ void ForwardCombineGates(
int n,
const Dtype* prev_state_data,
Dtype* input_gates,
Dtype* forget_gates,
Dtype* output_gates,
Dtype* input_values,
Dtype* tanh_next_memory_state,
Dtype* next_memory_state,
Dtype* next_hidden_state) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates[idx] = cuda_sigmoid(input_gates[idx]);
forget_gates[idx] = cuda_sigmoid(forget_gates[idx]);
output_gates[idx] = cuda_sigmoid(output_gates[idx]);
input_values[idx] = cuda_tanh(input_values[idx]);
next_memory_state[idx] = prev_state_data[idx] * forget_gates[idx] +
input_gates[idx] * input_values[idx];
tanh_next_memory_state[idx] = cuda_tanh(next_memory_state[idx]);
next_hidden_state[idx] = tanh_next_memory_state[idx] * output_gates[idx];
}
}
template <typename Dtype>
__global__ void BackwardGates(
int n,
const Dtype* input_gates,
const Dtype* forget_gates,
const Dtype* output_gates,
const Dtype* input_values,
const Dtype* tanh_next_memory_state,
Dtype* input_gates_diff,
Dtype* forget_gates_diff,
Dtype* output_gates_diff,
Dtype* input_values_diff,
Dtype* tanh_next_memory_diff) {
CUDA_KERNEL_LOOP(idx, n) {
input_gates_diff[idx] = cuda_sigmoid_diff(input_gates[idx]);
forget_gates_diff[idx] = cuda_sigmoid_diff(forget_gates[idx]);
output_gates_diff[idx] = cuda_sigmoid_diff(output_gates[idx]);
input_values_diff[idx] = cuda_tanh_diff(input_values[idx]);
tanh_next_memory_diff[idx] = cuda_tanh_diff(tanh_next_memory_state[idx]);
}
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
concat_top_vec_.clear();
concat_bottom_vec_.clear();
concat_bottom_vec_.push_back(bottom[0]);
concat_bottom_vec_.push_back(bottom[1]);
concat_top_vec_.push_back(concated_data_.get());
concat_layer_->Forward(concat_bottom_vec_, concat_top_vec_);
const Dtype* input_data = concated_data_->gpu_data();
const Dtype* prev_state_data = bottom[2]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
Dtype* next_hidden_state = top[0]->mutable_gpu_data();
Dtype* next_memory_state = top[1]->mutable_gpu_data();
Dtype* input_gates = input_gates_data_buffer_->mutable_gpu_data();
Dtype* forget_gates = forget_gates_data_buffer_->mutable_gpu_data();
Dtype* output_gates = output_gates_data_buffer_->mutable_gpu_data();
Dtype* input_values = input_values_data_buffer_->mutable_gpu_data();
Dtype* tanh_next_memory_state = tanh_mem_buffer_->mutable_gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_weight,
(Dtype)0., input_values);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, input_gate_weight,
(Dtype)0., input_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, forget_gate_weight,
(Dtype)0., forget_gates);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_,
(Dtype)1., input_data, output_gate_weight,
(Dtype)0., output_gates);
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
ForwardCombineGates<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count,
prev_state_data,
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
next_memory_state,
next_hidden_state);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LstmUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
caffe_gpu_set(this->concated_data_->count(), Dtype(0),
this->concated_data_->mutable_gpu_diff());
const Dtype* input_data = concated_data_->gpu_data();
const Dtype* prev_state_data = bottom[2]->gpu_data();
const Dtype* input_weight = this->blobs_[0]->gpu_data();
const Dtype* input_gate_weight = this->blobs_[1]->gpu_data();
const Dtype* forget_gate_weight = this->blobs_[2]->gpu_data();
const Dtype* output_gate_weight = this->blobs_[3]->gpu_data();
const Dtype* input_gates = input_gates_data_buffer_->gpu_data();
const Dtype* forget_gates = forget_gates_data_buffer_->gpu_data();
const Dtype* output_gates = output_gates_data_buffer_->gpu_data();
const Dtype* input_values = input_values_data_buffer_->gpu_data();
const Dtype* tanh_next_memory_state = tanh_mem_buffer_->gpu_data();
Dtype* gates_diff = gates_diff_buffer_->mutable_gpu_data();
Dtype* input_gates_diff = gates_diff + channels_ * num_ * 0;
Dtype* forget_gates_diff = gates_diff + channels_ * num_ * 1;
Dtype* output_gates_diff = gates_diff + channels_ * num_ * 2;
Dtype* input_values_diff = gates_diff + channels_ * num_ * 3;
Dtype* tanh_next_memory_diff = tanh_mem_buffer_->mutable_gpu_diff();
const int count = num_ * channels_;
// NOLINT_NEXT_LINE(whitespace/operators)
BackwardGates<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count,
input_gates,
forget_gates,
output_gates,
input_values,
tanh_next_memory_state,
input_gates_diff,
forget_gates_diff,
output_gates_diff,
input_values_diff,
tanh_next_memory_diff);
CUDA_POST_KERNEL_CHECK;
Dtype* input_weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* input_gate_weight_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* forget_gate_weight_diff = this->blobs_[2]->mutable_gpu_diff();
Dtype* output_gate_weight_diff = this->blobs_[3]->mutable_gpu_diff();
Dtype* input_diff = concated_data_->mutable_gpu_diff();
Dtype* prev_state_diff = bottom[2]->mutable_gpu_diff();
const Dtype* next_hidden_state_diff = top[0]->gpu_diff();
const Dtype* next_memory_state_diff = top[1]->gpu_diff();
Dtype* next_state_tot_diff = next_state_tot_diff_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, output_gates,
next_hidden_state_diff, next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, tanh_next_memory_diff,
next_state_tot_diff, next_state_tot_diff);
caffe_gpu_add(num_ * channels_, next_memory_state_diff,
next_state_tot_diff, next_state_tot_diff);
// caffe_gpu_axpby(num_ * channels_, Dtype(0), next_state_tot_diff,
// Dtype(0.5), next_state_tot_diff);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff,
forget_gates, prev_state_diff);
Dtype* dldg_data = dldg_buffer_->mutable_gpu_data();
caffe_gpu_mul(num_ * channels_, input_gates, input_values_diff, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, input_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, input_gates_diff, input_values, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., input_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, input_gate_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, forget_gates_diff, prev_state_data, dldg_data);
caffe_gpu_mul(num_ * channels_, next_state_tot_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., forget_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, forget_gate_weight,
(Dtype)1., input_diff);
caffe_gpu_mul(num_ * channels_, output_gates_diff, tanh_next_memory_state,
dldg_data);
caffe_gpu_mul(num_ * channels_, next_hidden_state_diff, dldg_data, dldg_data);
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
channels_, input_data_dim_, num_,
(Dtype)1., dldg_data, input_data,
(Dtype)1., output_gate_weight_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
num_, input_data_dim_, channels_,
(Dtype)1., dldg_data, output_gate_weight,
(Dtype)1., input_diff);
// caffe_gpu_axpby(num_ * input_data_dim_, Dtype(0), input_diff,
// Dtype(0.25), input_diff);
vector<bool> concat_propagate_down(2, true);
concat_layer_->Backward(concat_top_vec_, concat_propagate_down, concat_bottom_vec_);
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmUnitLayer);
} // namespace caffe
|
ed936017eaede518ac4fa67116c8d2832d6c21cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <string>
cv::Mat imageInputRGBA;
cv::Mat imageOutputRGBA;
uchar4 *d_inputImageRGBA__;
uchar4 *d_outputImageRGBA__;
float *h_filter__;
size_t numRows() { return imageInputRGBA.rows; }
size_t numCols() { return imageInputRGBA.cols; }
void preProcess(uchar4 **h_inputImageRGBA, uchar4 **h_outputImageRGBA,
uchar4 **d_inputImageRGBA, uchar4 **d_outputImageRGBA,
unsigned char **d_redBlurred,
unsigned char **d_greenBlurred,
unsigned char **d_blueBlurred,
float **h_filter, int *filterWidth,
const std::string &filename) {
checkCudaErrors(hipFree(0));
cv::Mat image = cv::imread(filename.c_str(), cv::IMREAD_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageInputRGBA, cv::COLOR_BGR2RGBA);
imageOutputRGBA.create(image.rows, image.cols, CV_8UC4);
if (!imageInputRGBA.isContinuous() || !imageOutputRGBA.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*h_inputImageRGBA = (uchar4 *)imageInputRGBA.ptr<unsigned char>(0);
*h_outputImageRGBA = (uchar4 *)imageOutputRGBA.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
printf("image dimensions: %zu %zu\n", numRows(), numCols());
checkCudaErrors(hipMalloc(d_inputImageRGBA, sizeof(uchar4) * numPixels));
checkCudaErrors(hipMalloc(d_outputImageRGBA, sizeof(uchar4) * numPixels));
checkCudaErrors(hipMemset(*d_outputImageRGBA, 0, numPixels * sizeof(uchar4))); //make sure no memory is left laying around
//copy input array to the GPU
checkCudaErrors(hipMemcpy(*d_inputImageRGBA, *h_inputImageRGBA, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice));
d_inputImageRGBA__ = *d_inputImageRGBA;
d_outputImageRGBA__ = *d_outputImageRGBA;
//now create the filter that they will use
const int blurKernelWidth = 9;
const float blurKernelSigma = 2.;
*filterWidth = blurKernelWidth;
//create and fill the filter we will convolve with
*h_filter = new float[blurKernelWidth * blurKernelWidth];
h_filter__ = *h_filter;
float filterSum = 0.f; //for normalization
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma));
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor;
}
}
//blurred
checkCudaErrors(hipMalloc(d_redBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMalloc(d_greenBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMalloc(d_blueBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_redBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_greenBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_blueBlurred, 0, sizeof(unsigned char) * numPixels));
}
void postProcess(const std::string& output_file) {
const int numPixels = numRows() * numCols();
//copy the output back to the host
checkCudaErrors(hipMemcpy(imageOutputRGBA.ptr<unsigned char>(0), d_outputImageRGBA__, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost));
cv::Mat imageOutputBGR;
cv::cvtColor(imageOutputRGBA, imageOutputBGR, cv::COLOR_RGBA2BGR);
//output the image
cv::imwrite(output_file.c_str(), imageOutputBGR);
//cleanup
hipFree(d_inputImageRGBA__);
hipFree(d_outputImageRGBA__);
delete[] h_filter__;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int r = threadIdx.x + blockIdx.x * blockDim.x;
const int c = threadIdx.y + blockIdx.y * blockDim.y;
if ( r >= numCols || c >= numRows ) {
return;
}
const int idx = r * numCols + c;
float avg = 0;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r = min(max(r + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(c + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
avg += image_value * filter_value;
}
}
outputChannel[idx] = avg;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int r = threadIdx.x + blockIdx.x * blockDim.x;
const int c = threadIdx.y + blockIdx.y * blockDim.y;
if ( r >= numCols || c >= numRows ) {
return;
}
int idx = r * numCols + c;
uchar4 rgba = inputImageRGBA[idx];
redChannel[idx] = rgba.x;
greenChannel[idx] = rgba.y;
blueChannel[idx] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.x * numCols + thread_2D_pos.y;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numRows || thread_2D_pos.y >= numCols)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
const dim3 gridSize(floor(numRows / blockWidth) + 1, floor(numCols / blockWidth) + 1, 1);
// Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
} | ed936017eaede518ac4fa67116c8d2832d6c21cb.cu | #include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <string>
cv::Mat imageInputRGBA;
cv::Mat imageOutputRGBA;
uchar4 *d_inputImageRGBA__;
uchar4 *d_outputImageRGBA__;
float *h_filter__;
size_t numRows() { return imageInputRGBA.rows; }
size_t numCols() { return imageInputRGBA.cols; }
void preProcess(uchar4 **h_inputImageRGBA, uchar4 **h_outputImageRGBA,
uchar4 **d_inputImageRGBA, uchar4 **d_outputImageRGBA,
unsigned char **d_redBlurred,
unsigned char **d_greenBlurred,
unsigned char **d_blueBlurred,
float **h_filter, int *filterWidth,
const std::string &filename) {
checkCudaErrors(cudaFree(0));
cv::Mat image = cv::imread(filename.c_str(), cv::IMREAD_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageInputRGBA, cv::COLOR_BGR2RGBA);
imageOutputRGBA.create(image.rows, image.cols, CV_8UC4);
if (!imageInputRGBA.isContinuous() || !imageOutputRGBA.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*h_inputImageRGBA = (uchar4 *)imageInputRGBA.ptr<unsigned char>(0);
*h_outputImageRGBA = (uchar4 *)imageOutputRGBA.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
printf("image dimensions: %zu %zu\n", numRows(), numCols());
checkCudaErrors(cudaMalloc(d_inputImageRGBA, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMalloc(d_outputImageRGBA, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMemset(*d_outputImageRGBA, 0, numPixels * sizeof(uchar4))); //make sure no memory is left laying around
//copy input array to the GPU
checkCudaErrors(cudaMemcpy(*d_inputImageRGBA, *h_inputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice));
d_inputImageRGBA__ = *d_inputImageRGBA;
d_outputImageRGBA__ = *d_outputImageRGBA;
//now create the filter that they will use
const int blurKernelWidth = 9;
const float blurKernelSigma = 2.;
*filterWidth = blurKernelWidth;
//create and fill the filter we will convolve with
*h_filter = new float[blurKernelWidth * blurKernelWidth];
h_filter__ = *h_filter;
float filterSum = 0.f; //for normalization
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma));
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor;
}
}
//blurred
checkCudaErrors(cudaMalloc(d_redBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMalloc(d_greenBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMalloc(d_blueBlurred, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_redBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_greenBlurred, 0, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_blueBlurred, 0, sizeof(unsigned char) * numPixels));
}
void postProcess(const std::string& output_file) {
const int numPixels = numRows() * numCols();
//copy the output back to the host
checkCudaErrors(cudaMemcpy(imageOutputRGBA.ptr<unsigned char>(0), d_outputImageRGBA__, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost));
cv::Mat imageOutputBGR;
cv::cvtColor(imageOutputRGBA, imageOutputBGR, cv::COLOR_RGBA2BGR);
//output the image
cv::imwrite(output_file.c_str(), imageOutputBGR);
//cleanup
cudaFree(d_inputImageRGBA__);
cudaFree(d_outputImageRGBA__);
delete[] h_filter__;
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int r = threadIdx.x + blockIdx.x * blockDim.x;
const int c = threadIdx.y + blockIdx.y * blockDim.y;
if ( r >= numCols || c >= numRows ) {
return;
}
const int idx = r * numCols + c;
float avg = 0;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
int image_r = min(max(r + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(c + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
avg += image_value * filter_value;
}
}
outputChannel[idx] = avg;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int r = threadIdx.x + blockIdx.x * blockDim.x;
const int c = threadIdx.y + blockIdx.y * blockDim.y;
if ( r >= numCols || c >= numRows ) {
return;
}
int idx = r * numCols + c;
uchar4 rgba = inputImageRGBA[idx];
redChannel[idx] = rgba.x;
greenChannel[idx] = rgba.y;
blueChannel[idx] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.x * numCols + thread_2D_pos.y;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numRows || thread_2D_pos.y >= numCols)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
const dim3 gridSize(floor(numRows / blockWidth) + 1, floor(numCols / blockWidth) + 1, 1);
// Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
} |
a571cb35e4255e51fa73c5256233577fa10dc0a7.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection2.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#include "GpuIds.hpp"
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
#if IS_FOR_MATLAB_TIGRE
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
#else
float sample=tex3D<float>(tex, u, v ,indAlpha+0.5f);
#endif
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// Check the available devices, and if they are the same
int dev;
checkDevices(gpuids);
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("hipMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
hipHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
if (isHostRegisterSupported ){
hipHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
for (int i = 0; i < nStreamDevice; ++i){
hipStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
hipHostMalloc((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
hipHostMalloc((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
hipTextureObject_t *texProj;
hipArray **d_cuArrTex;
texProj =(hipTextureObject_t*)malloc(deviceCount*2*sizeof(hipTextureObject_t));
d_cuArrTex =(hipArray**)malloc(deviceCount*2*sizeof(hipArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(gpuids,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
hipSetDevice(gpuids[dev]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipStreamSynchronize(stream[dev*nStreamDevice]);
hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipLaunchKernelGGL(( matrixConstantMultiply), dim3(60),dim3(MAXTREADS),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
hipMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, hipMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDestroyTextureObject(texProj[i*deviceCount+dev]);
hipFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipFree(dimage[dev]);
}
hipHostFree(projSinCosArray2Host);
hipHostFree(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
hipHostUnregister(result);
}
if (isHostRegisterSupported){
hipHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]);
cudaCheckErrors("hipFree fail");
// hipDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
int num_devices = gpuids.GetLength();
#if IS_FOR_MATLAB_TIGRE
const hipExtent extent =make_hipExtent(geo.nDetecV, geo.nDetecU, nangles);
#else
const hipExtent extent =make_hipExtent(geo.nDetecU, geo.nDetecV, nangles);
#endif
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void checkDevices(const GpuIds& gpuids){
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
int deviceCount = gpuids.GetLength();
const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name
char devicename[devicenamelength];
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(gpuids[dev]);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Atb:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
}
void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
const int deviceCount = gpuids.GetLength();
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
const int gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif | a571cb35e4255e51fa73c5256233577fa10dc0a7.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection2.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#include "GpuIds.hpp"
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
#if IS_FOR_MATLAB_TIGRE
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
#else
float sample=tex3D<float>(tex, u, v ,indAlpha+0.5f);
#endif
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// Check the available devices, and if they are the same
int dev;
checkDevices(gpuids);
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("cudaMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
cudaHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
if (isHostRegisterSupported ){
cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < nStreamDevice; ++i){
cudaStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
cudaMallocHost((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
cudaMallocHost((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
cudaTextureObject_t *texProj;
cudaArray **d_cuArrTex;
texProj =(cudaTextureObject_t*)malloc(deviceCount*2*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(deviceCount*2*sizeof(cudaArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(gpuids,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
cudaSetDevice(gpuids[dev]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
kernelPixelBackprojection<<<grid,block,0,stream[dev*nStreamDevice]>>>(geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
matrixConstantMultiply<<<60,MAXTREADS,0,stream[dev*nStreamDevice]>>>( geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
cudaMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, cudaMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDestroyTextureObject(texProj[i*deviceCount+dev]);
cudaFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaFree(dimage[dev]);
}
cudaFreeHost(projSinCosArray2Host);
cudaFreeHost(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
cudaHostUnregister(result);
}
if (isHostRegisterSupported){
cudaHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
cudaCheckErrors("cudaFree fail");
// cudaDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
int num_devices = gpuids.GetLength();
#if IS_FOR_MATLAB_TIGRE
const cudaExtent extent =make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles);
#else
const cudaExtent extent =make_cudaExtent(geo.nDetecU, geo.nDetecV, nangles);
#endif
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void checkDevices(const GpuIds& gpuids){
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
int deviceCount = gpuids.GetLength();
const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name
char devicename[devicenamelength];
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(gpuids[dev]);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("Atb:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
}
void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
const int deviceCount = gpuids.GetLength();
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
const int gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif |
d7e3ac4b75e97a28f9bf20ee4e9b2bf214128941.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test_comm.h"
void host_matrixMul(double* pC, double* pA,double *pB, int mA, int nB)
{
int i, j;
double *data_in1_d, *data_in2_d, *data_out_d;
float *data_in1_f, *data_in2_f, *data_out_f;
float *data_in1_f_gpu, *data_in2_f_gpu , *data_out_f_gpu;
int sizeBlock;
sizeBlock = VECTOR_BLOCK_SIZE;
int sizeA = mA*nB;
int sizeB = nB;
int sizeC = mA;
// get Input data pointer
data_in1_d = pA;
data_in2_d = pB;
// get Ouput data pointer
data_out_d = pC;
// Find the dimensions of the data
// Create an mxArray for the output data
// Create an input and output data array on the GPU
hipMalloc( (void **) &data_in1_f_gpu,sizeof(t_ve)*sizeA);
hipMalloc( (void **) &data_in2_f_gpu,sizeof(t_ve)*sizeB);
hipMalloc( (void **) &data_out_f_gpu,sizeof(t_ve)*sizeC);
// Retrieve the input data
// Check if the input array is single or double precision
// The input array is in double precision, it needs to be converted t floats before being sent to the card
data_in1_f = (t_ve *) malloc(sizeof(t_ve)*sizeA);
data_in2_f = (t_ve *) malloc(sizeof(t_ve)*sizeB);
data_out_f = (t_ve *) malloc(sizeof(t_ve)*sizeC);
for (j = 0; j < sizeA; j++)
{
data_in1_f[j] = (t_ve) data_in1_d[j];
}
for (j = 0; j < sizeB; j++)
{
data_in2_f[j] = (t_ve) data_in2_d[j];
}
for (i = 0; i < sizeA; i++)
{
<<<<<<< .mine
//printf("data_in1_f[%d] = %f, ", i, data_in1_f[i]);
=======
// printf("data_in1_f[%d] = %f, ", i, data_in1_f[i]);
>>>>>>> .r110
}
printf("\n");
hipMemcpy( data_in1_f_gpu, data_in1_f, sizeof(t_ve)*sizeA, hipMemcpyHostToDevice);
hipMemcpy( data_in2_f_gpu, data_in2_f, sizeof(t_ve)*sizeB, hipMemcpyHostToDevice);
// Compute execution configuration using 128 threads per block
dim3 dimBlock(sizeBlock);
//dim3 dimGrid((sizeIn)/dimBlock.x);
dim3 dimGrid(mA);
//if ( (sizeA) % sizeBlock !=0 ) dimGrid.x+=1;
//Call function on GPU hipLaunchKernelGGL((
matrixMul), dim3(dimGrid),dim3(dimBlock), 0, 0, data_out_f_gpu,data_in1_f_gpu, data_in2_f_gpu, mA,nB);
hipError_t e;
e = hipGetLastError();
if ( e != hipSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", hipGetErrorString(e));
exit(-1);
}
// Copy result back to host
hipMemcpy( data_out_f, data_out_f_gpu, sizeof(float)*sizeC, hipMemcpyDeviceToHost);
for (i = 0; i < sizeC; i++)
{
<<<<<<< .mine
//printf("data_out_f[%d] = %f, ", i, data_out_f[i]);
=======
// printf("data_out_f[%d] = %f, ", i, data_out_f[i]);
>>>>>>> .r110
}
// printf("\n");
// Create a pointer to the output data
// Convert from single to double before returning
for (j = 0; j < sizeC; j++)
{
data_out_d[j] = (double) data_out_f[j];
}
// Clean-up memory on device and host
free(data_in1_f);
free(data_in2_f);
free(data_out_f);
hipFree(data_in1_f_gpu);
hipFree(data_in2_f_gpu);
hipFree(data_out_f_gpu);
}
int test_matrixMul()
{
double *pA, *pB,*pC;
int mA, nB;
int i;
double expect;
int loop;
for (loop = 256; loop < 260; loop++) {
int expect_error = 0;
mA = loop;
nB = loop;
pA = (double*)malloc(sizeof(double)*mA*nB);
pB = (double*)malloc(sizeof(double)*nB);
pC = (double*)malloc(sizeof(double)*mA);
for (i = 0; i < mA*nB; i++){
pA[i] = 1;
}
for (i = 0; i < nB; i++){
pB[i] = 1;
}
host_matrixMul(pC,pA, pB, mA, nB);
expect = (double) nB;
printf("output square result");
for (i = 0; i < nB; i++)
{ if(pC[i] != expect)
printf(" pC[%d] = %lf, ", i, pC[i]);
expect_error = loop;
}
printf("\n");
printf("expect error = %d,\n",expect_error);
free(pA);
free(pB);
free(pC);
}
return 0;
}
int mexTest_matrixMul(double *pC,double *pA,double *pB,int mA, int nB)
{
//double *pC;
//int sizeOut;
int i;
//pC = (double*)malloc(sizeof(double)*mA);
host_matrixMul(pC,pA, pB, mA, nB);
printf("output square result");
for (i = 0; i < nB; i++)
{
//if(pC[i] != expect)
printf(" pC[%d] = %lf, ", i, pC[i]);
// expect_error = loop;
}
printf("\n");
// printf("expect error = %d,\n",expect_error);
//free(pC);
return 0;
} | d7e3ac4b75e97a28f9bf20ee4e9b2bf214128941.cu | #include "test_comm.h"
void host_matrixMul(double* pC, double* pA,double *pB, int mA, int nB)
{
int i, j;
double *data_in1_d, *data_in2_d, *data_out_d;
float *data_in1_f, *data_in2_f, *data_out_f;
float *data_in1_f_gpu, *data_in2_f_gpu , *data_out_f_gpu;
int sizeBlock;
sizeBlock = VECTOR_BLOCK_SIZE;
int sizeA = mA*nB;
int sizeB = nB;
int sizeC = mA;
// get Input data pointer
data_in1_d = pA;
data_in2_d = pB;
// get Ouput data pointer
data_out_d = pC;
// Find the dimensions of the data
// Create an mxArray for the output data
// Create an input and output data array on the GPU
cudaMalloc( (void **) &data_in1_f_gpu,sizeof(t_ve)*sizeA);
cudaMalloc( (void **) &data_in2_f_gpu,sizeof(t_ve)*sizeB);
cudaMalloc( (void **) &data_out_f_gpu,sizeof(t_ve)*sizeC);
// Retrieve the input data
// Check if the input array is single or double precision
// The input array is in double precision, it needs to be converted t floats before being sent to the card
data_in1_f = (t_ve *) malloc(sizeof(t_ve)*sizeA);
data_in2_f = (t_ve *) malloc(sizeof(t_ve)*sizeB);
data_out_f = (t_ve *) malloc(sizeof(t_ve)*sizeC);
for (j = 0; j < sizeA; j++)
{
data_in1_f[j] = (t_ve) data_in1_d[j];
}
for (j = 0; j < sizeB; j++)
{
data_in2_f[j] = (t_ve) data_in2_d[j];
}
for (i = 0; i < sizeA; i++)
{
<<<<<<< .mine
//printf("data_in1_f[%d] = %f, ", i, data_in1_f[i]);
=======
// printf("data_in1_f[%d] = %f, ", i, data_in1_f[i]);
>>>>>>> .r110
}
printf("\n");
cudaMemcpy( data_in1_f_gpu, data_in1_f, sizeof(t_ve)*sizeA, cudaMemcpyHostToDevice);
cudaMemcpy( data_in2_f_gpu, data_in2_f, sizeof(t_ve)*sizeB, cudaMemcpyHostToDevice);
// Compute execution configuration using 128 threads per block
dim3 dimBlock(sizeBlock);
//dim3 dimGrid((sizeIn)/dimBlock.x);
dim3 dimGrid(mA);
//if ( (sizeA) % sizeBlock !=0 ) dimGrid.x+=1;
//Call function on GPU
matrixMul<<<dimGrid,dimBlock>>>(data_out_f_gpu,data_in1_f_gpu, data_in2_f_gpu, mA,nB);
cudaError_t e;
e = cudaGetLastError();
if ( e != cudaSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", cudaGetErrorString(e));
exit(-1);
}
// Copy result back to host
cudaMemcpy( data_out_f, data_out_f_gpu, sizeof(float)*sizeC, cudaMemcpyDeviceToHost);
for (i = 0; i < sizeC; i++)
{
<<<<<<< .mine
//printf("data_out_f[%d] = %f, ", i, data_out_f[i]);
=======
// printf("data_out_f[%d] = %f, ", i, data_out_f[i]);
>>>>>>> .r110
}
// printf("\n");
// Create a pointer to the output data
// Convert from single to double before returning
for (j = 0; j < sizeC; j++)
{
data_out_d[j] = (double) data_out_f[j];
}
// Clean-up memory on device and host
free(data_in1_f);
free(data_in2_f);
free(data_out_f);
cudaFree(data_in1_f_gpu);
cudaFree(data_in2_f_gpu);
cudaFree(data_out_f_gpu);
}
int test_matrixMul()
{
double *pA, *pB,*pC;
int mA, nB;
int i;
double expect;
int loop;
for (loop = 256; loop < 260; loop++) {
int expect_error = 0;
mA = loop;
nB = loop;
pA = (double*)malloc(sizeof(double)*mA*nB);
pB = (double*)malloc(sizeof(double)*nB);
pC = (double*)malloc(sizeof(double)*mA);
for (i = 0; i < mA*nB; i++){
pA[i] = 1;
}
for (i = 0; i < nB; i++){
pB[i] = 1;
}
host_matrixMul(pC,pA, pB, mA, nB);
expect = (double) nB;
printf("output square result");
for (i = 0; i < nB; i++)
{ if(pC[i] != expect)
printf(" pC[%d] = %lf, ", i, pC[i]);
expect_error = loop;
}
printf("\n");
printf("expect error = %d,\n",expect_error);
free(pA);
free(pB);
free(pC);
}
return 0;
}
int mexTest_matrixMul(double *pC,double *pA,double *pB,int mA, int nB)
{
//double *pC;
//int sizeOut;
int i;
//pC = (double*)malloc(sizeof(double)*mA);
host_matrixMul(pC,pA, pB, mA, nB);
printf("output square result");
for (i = 0; i < nB; i++)
{
//if(pC[i] != expect)
printf(" pC[%d] = %lf, ", i, pC[i]);
// expect_error = loop;
}
printf("\n");
// printf("expect error = %d,\n",expect_error);
//free(pC);
return 0;
} |
038301f0a0f55157370fa1b37e8b350440e7d778.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define N ( 1024 * 1024 )
#define RADIUS 5
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
result += in[i];
}
out[globalIndex] = result;
return;
}
int main()
{
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* allocate space for device copies of in, out */
CUDA_CALL( hipMalloc( (void **) &d_in, size ) );
CUDA_CALL( hipMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = 0;
}
/* copy inputs to device */
CUDA_CALL( hipMemcpy( d_in, in, size, hipMemcpyHostToDevice ) );
CUDA_CALL( hipMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
dim3 blocks( (N / threads.x) + 1, 1, 1);
/* start the timers */
hipEvent_t start, stop;
CUDA_CALL( hipEventCreate( &start ) );
CUDA_CALL( hipEventCreate( &stop ) );
CUDA_CALL( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( stencil_1d), dim3(blocks), dim3(threads) , 0, 0, N, d_in, d_out );
CUDA_CHECK();
CUDA_CALL( hipDeviceSynchronize() );
/* stop the timers */
CUDA_CALL( hipEventRecord( stop, 0 ) );
CUDA_CALL( hipEventSynchronize( stop ) );
float elapsedTime;
CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
CUDA_CALL( hipMemcpy( out, d_out, size, hipMemcpyDeviceToHost ) );
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
printf("FAIL\n");
goto end;
} /* end if */
} /* end for */
printf("PASS\n");
end:
/* clean up */
free(in);
free(out);
CUDA_CALL( hipFree( d_in ) );
CUDA_CALL( hipFree( d_out ) );
CUDA_CALL( hipDeviceReset() );
return 0;
} /* end main */
| 038301f0a0f55157370fa1b37e8b350440e7d778.cu | /*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define N ( 1024 * 1024 )
#define RADIUS 5
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
result += in[i];
}
out[globalIndex] = result;
return;
}
int main()
{
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* allocate space for device copies of in, out */
CUDA_CALL( cudaMalloc( (void **) &d_in, size ) );
CUDA_CALL( cudaMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = 0;
}
/* copy inputs to device */
CUDA_CALL( cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
dim3 blocks( (N / threads.x) + 1, 1, 1);
/* start the timers */
cudaEvent_t start, stop;
CUDA_CALL( cudaEventCreate( &start ) );
CUDA_CALL( cudaEventCreate( &stop ) );
CUDA_CALL( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
stencil_1d<<< blocks, threads >>>( N, d_in, d_out );
CUDA_CHECK();
CUDA_CALL( cudaDeviceSynchronize() );
/* stop the timers */
CUDA_CALL( cudaEventRecord( stop, 0 ) );
CUDA_CALL( cudaEventSynchronize( stop ) );
float elapsedTime;
CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
CUDA_CALL( cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost ) );
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
printf("FAIL\n");
goto end;
} /* end if */
} /* end for */
printf("PASS\n");
end:
/* clean up */
free(in);
free(out);
CUDA_CALL( cudaFree( d_in ) );
CUDA_CALL( cudaFree( d_out ) );
CUDA_CALL( cudaDeviceReset() );
return 0;
} /* end main */
|
7eeb8eabea3ecc8b01e164fefda5ed8041beb72a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `product`
#include <cudf/detail/reduction_functions.hpp>
#include "simple_hip.cuh"
std::unique_ptr<cudf::scalar> cudf::experimental::reduction::product(
column_view const& col, cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
using reducer = cudf::experimental::reduction::simple::element_type_dispatcher< cudf::experimental::reduction::op::product>;
return cudf::experimental::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
| 7eeb8eabea3ecc8b01e164fefda5ed8041beb72a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `product`
#include <cudf/detail/reduction_functions.hpp>
#include "simple.cuh"
std::unique_ptr<cudf::scalar> cudf::experimental::reduction::product(
column_view const& col, cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
using reducer = cudf::experimental::reduction::simple::element_type_dispatcher< cudf::experimental::reduction::op::product>;
return cudf::experimental::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
|
57c8c6d9e4b59b9e1a3ce5f0c87fa4f6ef06c83f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "include/SWE.cuh"
#include "include/utils.cuh"
#include "../include/structs.h"
#include "../include/macros.h"
__device__ void calculateFeqSWE(prec* feq, prec* localMacroscopic, prec e){
prec factor = 1 / (9 * e*e);
prec localh = localMacroscopic[0];
prec localux = localMacroscopic[1];
prec localuy = localMacroscopic[2];
prec gh = 1.5 * 9.8 * localh;
prec usq = 1.5 * (localux * localux + localuy * localuy);
prec ux3 = 3.0 * e * localux;
prec uy3 = 3.0 * e * localuy;
prec uxuy5 = ux3 + uy3;
prec uxuy6 = uy3 - ux3;
feq[0] = localh * (1 - factor * (5.0 * gh + 4.0 * usq));
feq[1] = localh * factor * (gh + ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[2] = localh * factor * (gh + uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[3] = localh * factor * (gh - ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[4] = localh * factor * (gh - uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[5] = localh * factor * 0.25 * (gh + uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[6] = localh * factor * 0.25 * (gh + uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
feq[7] = localh * factor * 0.25 * (gh - uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[8] = localh * factor * 0.25 * (gh - uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
}
__device__ void calculateForcingSWE(prec* forcing, prec* h, const prec* __restrict__ b, prec e,
int i, int Lx, int* ex, int* ey){
prec factor = 1 / (6 * e*e);
prec localh = h[i];
prec localb = b[i];
for (int j = 0; j < 4; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[j] = factor * 9.8 * (localh + h[index]) * (b[index] - localb);
}
for (int j = 4; j < 8; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[j] = factor * 0.25 * 9.8 * (localh + h[index]) * (b[index] - localb);
}
}
__global__ void hKernel(const configStruct config, const prec* __restrict__ w,
const prec* __restrict__ b, prec* h){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
h[i] = w[i] - b[i];
}
}
__global__ void wKernel(const configStruct config, const prec* __restrict__ h,
const prec* __restrict__ b, prec* w){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
w[i] = h[i] + b[i];
}
}
| 57c8c6d9e4b59b9e1a3ce5f0c87fa4f6ef06c83f.cu | #include <cuda_runtime.h>
#include "include/SWE.cuh"
#include "include/utils.cuh"
#include "../include/structs.h"
#include "../include/macros.h"
__device__ void calculateFeqSWE(prec* feq, prec* localMacroscopic, prec e){
prec factor = 1 / (9 * e*e);
prec localh = localMacroscopic[0];
prec localux = localMacroscopic[1];
prec localuy = localMacroscopic[2];
prec gh = 1.5 * 9.8 * localh;
prec usq = 1.5 * (localux * localux + localuy * localuy);
prec ux3 = 3.0 * e * localux;
prec uy3 = 3.0 * e * localuy;
prec uxuy5 = ux3 + uy3;
prec uxuy6 = uy3 - ux3;
feq[0] = localh * (1 - factor * (5.0 * gh + 4.0 * usq));
feq[1] = localh * factor * (gh + ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[2] = localh * factor * (gh + uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[3] = localh * factor * (gh - ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[4] = localh * factor * (gh - uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[5] = localh * factor * 0.25 * (gh + uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[6] = localh * factor * 0.25 * (gh + uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
feq[7] = localh * factor * 0.25 * (gh - uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[8] = localh * factor * 0.25 * (gh - uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
}
__device__ void calculateForcingSWE(prec* forcing, prec* h, const prec* __restrict__ b, prec e,
int i, int Lx, int* ex, int* ey){
prec factor = 1 / (6 * e*e);
prec localh = h[i];
prec localb = b[i];
for (int j = 0; j < 4; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[j] = factor * 9.8 * (localh + h[index]) * (b[index] - localb);
}
for (int j = 4; j < 8; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[j] = factor * 0.25 * 9.8 * (localh + h[index]) * (b[index] - localb);
}
}
__global__ void hKernel(const configStruct config, const prec* __restrict__ w,
const prec* __restrict__ b, prec* h){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
h[i] = w[i] - b[i];
}
}
__global__ void wKernel(const configStruct config, const prec* __restrict__ h,
const prec* __restrict__ b, prec* w){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
w[i] = h[i] + b[i];
}
}
|
cf6aab719b1b38299975b6725b8b6370615acade.hip | // !!! This is a file automatically generated by hipify!!!
#include "cLayer.h"
#include <ImageIO.h>
//------------------------------------------------------------------------
//-------------------------Host Funcitons Below-----------------------
//------------------------------------------------------------------------
// According to the type of the layer
// Initialize the instance
void cLayer::Initialize(short sLevel){
cImages* TempOut;
nLayerLevel = sLevel;
bFirstCom = true;
nFilterCount = GlobalValues.FilterMultipliers[nLayerLevel];
nFilterGroupCount = 1;
Input = thrust::host_vector<cImages*>();
DevInput = thrust::host_vector<cImagesGPU*>();
Output = thrust::host_vector<cImages*>();
DevOutput = thrust::host_vector<cImagesGPU*>();
hipEventCreate(&EventBusy, hipEventBlockingSync);
// Attach input of the lower layer
if(sLevel != 0){
Input = LowerLayer->Output;
DevInput = LowerLayer->DevOutput;
}
InSize.width = Input[0]->width();
InSize.height = Input[0]->height();
FilterGroup = new cFilterGroup();
if(Type == cGlobalValues::CONVOLUTION){
kerSize = GlobalValues.kerSizes[(nLayerLevel)];
FilterGroup->Type = cGlobalValues::CONVOLUTION;
}else if(Type == cGlobalValues::FULLYCONNECTED){
// Only one group in fully connected layer
FilterGroup->Type =cGlobalValues::FULLYCONNECTED;
}else if(Type == cGlobalValues::MAXPOOLING){
// One group in maxpooling, handle all the input together
FilterGroup->Type = cGlobalValues::MAXPOOLING;
// For maxpooling layer there is no need to calculate delta value
// Directly using the lower layer delta value is OK.
}else if(Type == cGlobalValues::OUTPUT){
FilterGroup->Type = cGlobalValues::OUTPUT;
}
FilterGroup->Input = Input;
FilterGroup->DevInput = DevInput;
FilterGroup->InSize = InSize;
FilterGroup->H2DStream = H2DStream;
FilterGroup->D2HStream = D2HStream;
FilterGroup->CalcStream = CalcStream;
FilterGroup->D2DStream = D2DStream;
FilterGroup->Initialize(nLayerLevel);
for(int j = 0; j < FilterGroup->Output.size(); j++){
Output.push_back(FilterGroup->Output[j]);
DevOutput.push_back(FilterGroup->DevOutput[j]);
}
OutSize.height = Output[0]->height();
OutSize.width = Output[0]->width();
}
void cLayer::Trace(){
fprintf(logFile, "|| Starting tracing No.%d layer:\n", nLayerLevel);
if(Type == cGlobalValues::CONVOLUTION){
fprintf(logFile, "|| Layer type: convolution layer.\n");
}else if(Type == cGlobalValues::MAXPOOLING){
fprintf(logFile, "|| Layer type: max pooling layer.\n");
}else if(Type == cGlobalValues::FULLYCONNECTED){
fprintf(logFile, "|| Layer type: fully-connected layer.\n");
}else if(Type == cGlobalValues::OUTPUT){
fprintf(logFile, "|| Layer type: output layer.\n");
}
fprintf(logFile, "|| Filter group count:%d.\n", 1);
FilterGroup->Trace(1);
fprintf(logFile, "|| No.%d Layer tracing finished.\n||\n", nLayerLevel);
}
void cLayer::TraceDelta(){
FILE* log = NULL;
char* cpath, *num;
char d[] = "_Delta";
cpath = (char*)malloc(255 * sizeof(char));
memset( cpath, 0, 255 * sizeof(char));
num = (char*)malloc(10 * sizeof(char));
itoa(nLayerLevel, num, 10);
strcat( cpath, LOG_FILE_BATCH_PATH);
strcat( cpath, d);
strcat( cpath, num);
strcat( cpath, ".txt");
log = fopen( cpath, "w");
fprintf(log, "|| Starting tracing delta of No.%d layer:\n", nLayerLevel);
if(Type == cGlobalValues::CONVOLUTION){
fprintf(log, "|| Layer type: convolution layer.\n");
}else if(Type == cGlobalValues::MAXPOOLING){
fprintf(log, "|| Layer type: max pooling layer.\n");
}else if(Type == cGlobalValues::FULLYCONNECTED){
fprintf(log, "|| Layer type: fully-connected layer.\n");
}else if(Type == cGlobalValues::OUTPUT){
fprintf(log, "|| Layer type: output layer.\n");
}
fprintf(log, "|| Filter group count:%d.\n", 1);
FilterGroup->TraceDelta(log);
fprintf(log, "|| No.%d Layer tracing finished.\n||\n", nLayerLevel);
fclose(log);
}
void cLayer::SetTarget(cImagesGPU* Target){
if(Type != cGlobalValues::OUTPUT) return;
FilterGroup->SetTarget( Target);
}
void cLayer::CalcDelta(){
NppiSize iSize = LowerLayer->FilterGroup->OutSize;
FilterGroup->CalcDelta(LowerLayer->FilterGroup->fDevDelta, iSize);
}
void cLayer::Train(){
if(Type == cGlobalValues::MAXPOOLING) return;
if(nLayerLevel != 0) FilterGroup->Train(LowerLayer->FilterGroup->OutSize.width * LowerLayer->FilterGroup->OutSize.height);
else FilterGroup->Train(FilterGroup->MapSize.width*FilterGroup->MapSize.height);
}
// Allocate the combined all-in-one memory block for GPU
void cLayer::AllocDevIn(){
if(!bCombineInput) return;
if(DevIn != NULL) hipFree(DevIn);
int size = 0;
if(GlobalValues.bUseCPUMem){
size = (Input.size() * (Input[0]->width()) * (Input[0]->height()));
}else{
size = (DevInput.size() * (DevInput[0]->width()) * (DevInput[0]->height()));
}
checkCudaErrors(hipMalloc((float**)&DevIn, size * sizeof(float)));
}
// Check all the flags and layer attributes to decide the allocation of device memory
// Including the device input & output of the layer and the device input & output of the filter groups
// For every layer and every goup, DevIn, DevOut, DevInput and DevOutput must be allocated
void cLayer::CheckDeviceMemory(){
// DevInput of the layers
/*if(nLayerLevel != 0) DevInput = LowerLayer->DevOutput;
else{
DevInput.push_back(new cImagesGPU(*Input[0]));
}
for(int i = 0; i < FilterGroups.size(); i++){
// DevInput of filter groups
if((FilterGroups[i]->DevInput.empty())||GlobalValues.bUseCPUMem){
FilterGroups[i]->DevInput.clear();
for(int j = 0; j < Input.size(); j++){
FilterGroups[i]->DevInput.push_back(DevInput[j]);
}
FilterGroups[i]->InputCount = Input.size();
}
// DevOutput of the group
if(FilterGroups[i]->DevOutput.empty()||GlobalValues.bUseCPUMem){
FilterGroups[i]->ClearGPUOutput();
FilterGroups[i]->DevOutputAlloc();
}
}*/
/*if(Type == cGlobalValues::CONVOLUTION){
// Convolution dont use DevIn & DevOut
// DevInput of Layer
if(nLayerLevel != 0){
DevInput = LowerLayer->DevOutput;
}else{
cImagesGPU* DevSrc = new cImagesGPU(*Input[0]);
DevInput.push_back( DevSrc);
}
FilterGroup->DevInput.push_back(DevInput[0]);
FilterGroup->DevOutputAlloc();
}else if(Type == cGlobalValues::MAXPOOLING){
// DevInput layer and group
DevInput = LowerLayer->DevOutput;
FilterGroup->DevInput = DevInput;
// DevIn layer and Group
AllocDevIn();
FilterGroup->DevIn = DevIn;
// DevOut group
FilterGroup->DevOutAlloc();
// DevOut of layer & DevOutput of group
if(bCombineOutput){
DevOut = FilterGroup->DevOut;
}else{
DevOut = NULL;
FilterGroup->DevOutputAlloc();
}
}else if(Type == cGlobalValues::FULLYCONNECTED){
// DevInput of layer and group
DevInput = LowerLayer->DevOutput;
FilterGroup->DevInput = DevInput;
// DevIn of layer and group
if(LowerLayer->bCombineOutput){
DevIn = LowerLayer->DevOut;
}else{
AllocDevIn();
}
FilterGroup->DevIn = DevIn;
if(bCombineOutput){
// DevOut of group and layer, not using output here
FilterGroup->DevOutAlloc();
DevOut = FilterGroup->DevOut;
}else{
// Usually flly connected layers only output combined results
// Not over here
FilterGroup->DevOutputAlloc();
}
}else if(Type == cGlobalValues::OUTPUT){
// Usually dont use devinput
DevInput = LowerLayer->DevOutput;
FilterGroup->DevInput = DevInput;
DevIn = LowerLayer->DevOut;
FilterGroup->DevIn = DevIn;
FilterGroup->DevOutAlloc();
DevOut = FilterGroup->DevOut;
// Because output is always need to copy back to CPU
// Use DevOutput
FilterGroup->DevOutputAlloc();
DevOutput = FilterGroup->DevOutput;
}
// For all layers adds filter groups' devoutput to layer's output
// When its not empty and we are using GPU mem directly dont alloc new ones
if(DevOutput.empty()||GlobalValues.bUseCPUMem){
DevOutput.clear();
for(int j = 0; j < FilterGroup->DevOutput.size(); j++)
DevOutput.push_back(FilterGroup->DevOutput[j]);
}*/
}
void cLayer::Compute(){
// Mainly allocate the memory for device computation
// CheckDeviceMemory();
// Using CPU mem and copy inputs to device
//if(nLayerLevel == 0) FilterGroup->CopyInputs();
// Setting the pointers of DevInput into one device_ptr
if(Type != cGlobalValues::MAXPOOLING){
if(bFirstCom){
if(nLayerLevel == GlobalValues.sConvLayerCount + GlobalValues.sPoolLayerCount)
FilterGroup->ExtendInput1D();
else FilterGroup->ExtendInput();
}else if(nLayerLevel != 0){
if(nLayerLevel == GlobalValues.sConvLayerCount + GlobalValues.sPoolLayerCount)
FilterGroup->CopyDevInput1D(LowerLayer->FilterGroup->DevOutput[0], LowerLayer->FilterGroup->DevOutput[0]->size());
else FilterGroup->CopyDevInput(LowerLayer->FilterGroup->DevOutput[0], LowerLayer->FilterGroup->DevOutput[0]->size());
}
}
FilterGroup->Compute(0);
hipEventRecord( EventBusy, *CalcStream);
/*checkCudaErrors( hipEventSynchronize( EventBusy));*/
/*if(GlobalValues.bUseCPUMem) FilterGroup->CopyResults();
hipEventRecord( EventBusy, GlobalValues.D2HStream);
checkCudaErrors( hipEventSynchronize( EventBusy));
TraceOutput();*/
/*if(Type == cGlobalValues::FULLYCONNECTED){
FilterGroups[0]->ClearGPUTmp();
}*/
// Waiting for the GPU asynchronize computation finish
if(bFirstCom == true) bFirstCom = false;
}
void cLayer::CopyWeightsD2H(){
FilterGroup->CopyWeightsD2H();
//checkCudaErrors(hipEventSynchronize(FilterGroup->EventD2H));
}
void cLayer::SaveWeights(){
FilterGroup->SaveWeights();
}
void cLayer::LoadWeights(){
FilterGroup->LoadWeights();
}
void cLayer::ClearGPUOutput(){
if(GlobalValues.bUseCPUMem){
FilterGroup->ClearGPUOutput();
}
}
void cLayer::TraceDevOutput(int col){
FilterGroup->TraceDevOutput(col);
}
cLayer::~cLayer(){
for(int i = 0; i < nFilterGroupCount; i++){
if(FilterGroup!= NULL) {
delete FilterGroup;
FilterGroup = NULL;
}
}
for(int i = 0; i < Input.size(); i++){
if(Input[i] != NULL){
Input[i] = NULL;
}
}
Input.clear();
for(int i = 0; i < Output.size(); i++){
if(Output[i] != NULL){
Output[i] = NULL;
}
}
Output.clear();
if(DevIn != NULL){
delete DevIn;
DevIn = NULL;
}
if(DevOut != NULL){
delete DevOut;
DevOut = NULL;
}
} | cf6aab719b1b38299975b6725b8b6370615acade.cu |
#include "cLayer.h"
#include <ImageIO.h>
//------------------------------------------------------------------------
//-------------------------Host Funcitons Below-----------------------
//------------------------------------------------------------------------
// According to the type of the layer
// Initialize the instance
void cLayer::Initialize(short sLevel){
cImages* TempOut;
nLayerLevel = sLevel;
bFirstCom = true;
nFilterCount = GlobalValues.FilterMultipliers[nLayerLevel];
nFilterGroupCount = 1;
Input = thrust::host_vector<cImages*>();
DevInput = thrust::host_vector<cImagesGPU*>();
Output = thrust::host_vector<cImages*>();
DevOutput = thrust::host_vector<cImagesGPU*>();
cudaEventCreate(&EventBusy, cudaEventBlockingSync);
// Attach input of the lower layer
if(sLevel != 0){
Input = LowerLayer->Output;
DevInput = LowerLayer->DevOutput;
}
InSize.width = Input[0]->width();
InSize.height = Input[0]->height();
FilterGroup = new cFilterGroup();
if(Type == cGlobalValues::CONVOLUTION){
kerSize = GlobalValues.kerSizes[(nLayerLevel)];
FilterGroup->Type = cGlobalValues::CONVOLUTION;
}else if(Type == cGlobalValues::FULLYCONNECTED){
// Only one group in fully connected layer
FilterGroup->Type =cGlobalValues::FULLYCONNECTED;
}else if(Type == cGlobalValues::MAXPOOLING){
// One group in maxpooling, handle all the input together
FilterGroup->Type = cGlobalValues::MAXPOOLING;
// For maxpooling layer there is no need to calculate delta value
// Directly using the lower layer delta value is OK.
}else if(Type == cGlobalValues::OUTPUT){
FilterGroup->Type = cGlobalValues::OUTPUT;
}
FilterGroup->Input = Input;
FilterGroup->DevInput = DevInput;
FilterGroup->InSize = InSize;
FilterGroup->H2DStream = H2DStream;
FilterGroup->D2HStream = D2HStream;
FilterGroup->CalcStream = CalcStream;
FilterGroup->D2DStream = D2DStream;
FilterGroup->Initialize(nLayerLevel);
for(int j = 0; j < FilterGroup->Output.size(); j++){
Output.push_back(FilterGroup->Output[j]);
DevOutput.push_back(FilterGroup->DevOutput[j]);
}
OutSize.height = Output[0]->height();
OutSize.width = Output[0]->width();
}
void cLayer::Trace(){
fprintf(logFile, "|| Starting tracing No.%d layer:\n", nLayerLevel);
if(Type == cGlobalValues::CONVOLUTION){
fprintf(logFile, "|| Layer type: convolution layer.\n");
}else if(Type == cGlobalValues::MAXPOOLING){
fprintf(logFile, "|| Layer type: max pooling layer.\n");
}else if(Type == cGlobalValues::FULLYCONNECTED){
fprintf(logFile, "|| Layer type: fully-connected layer.\n");
}else if(Type == cGlobalValues::OUTPUT){
fprintf(logFile, "|| Layer type: output layer.\n");
}
fprintf(logFile, "|| Filter group count:%d.\n", 1);
FilterGroup->Trace(1);
fprintf(logFile, "|| No.%d Layer tracing finished.\n||\n", nLayerLevel);
}
void cLayer::TraceDelta(){
FILE* log = NULL;
char* cpath, *num;
char d[] = "_Delta";
cpath = (char*)malloc(255 * sizeof(char));
memset( cpath, 0, 255 * sizeof(char));
num = (char*)malloc(10 * sizeof(char));
itoa(nLayerLevel, num, 10);
strcat( cpath, LOG_FILE_BATCH_PATH);
strcat( cpath, d);
strcat( cpath, num);
strcat( cpath, ".txt");
log = fopen( cpath, "w");
fprintf(log, "|| Starting tracing delta of No.%d layer:\n", nLayerLevel);
if(Type == cGlobalValues::CONVOLUTION){
fprintf(log, "|| Layer type: convolution layer.\n");
}else if(Type == cGlobalValues::MAXPOOLING){
fprintf(log, "|| Layer type: max pooling layer.\n");
}else if(Type == cGlobalValues::FULLYCONNECTED){
fprintf(log, "|| Layer type: fully-connected layer.\n");
}else if(Type == cGlobalValues::OUTPUT){
fprintf(log, "|| Layer type: output layer.\n");
}
fprintf(log, "|| Filter group count:%d.\n", 1);
FilterGroup->TraceDelta(log);
fprintf(log, "|| No.%d Layer tracing finished.\n||\n", nLayerLevel);
fclose(log);
}
void cLayer::SetTarget(cImagesGPU* Target){
if(Type != cGlobalValues::OUTPUT) return;
FilterGroup->SetTarget( Target);
}
void cLayer::CalcDelta(){
NppiSize iSize = LowerLayer->FilterGroup->OutSize;
FilterGroup->CalcDelta(LowerLayer->FilterGroup->fDevDelta, iSize);
}
void cLayer::Train(){
if(Type == cGlobalValues::MAXPOOLING) return;
if(nLayerLevel != 0) FilterGroup->Train(LowerLayer->FilterGroup->OutSize.width * LowerLayer->FilterGroup->OutSize.height);
else FilterGroup->Train(FilterGroup->MapSize.width*FilterGroup->MapSize.height);
}
// Allocate the combined all-in-one memory block for GPU
void cLayer::AllocDevIn(){
if(!bCombineInput) return;
if(DevIn != NULL) cudaFree(DevIn);
int size = 0;
if(GlobalValues.bUseCPUMem){
size = (Input.size() * (Input[0]->width()) * (Input[0]->height()));
}else{
size = (DevInput.size() * (DevInput[0]->width()) * (DevInput[0]->height()));
}
checkCudaErrors(cudaMalloc((float**)&DevIn, size * sizeof(float)));
}
// Check all the flags and layer attributes to decide the allocation of device memory
// Including the device input & output of the layer and the device input & output of the filter groups
// For every layer and every goup, DevIn, DevOut, DevInput and DevOutput must be allocated
void cLayer::CheckDeviceMemory(){
// DevInput of the layers
/*if(nLayerLevel != 0) DevInput = LowerLayer->DevOutput;
else{
DevInput.push_back(new cImagesGPU(*Input[0]));
}
for(int i = 0; i < FilterGroups.size(); i++){
// DevInput of filter groups
if((FilterGroups[i]->DevInput.empty())||GlobalValues.bUseCPUMem){
FilterGroups[i]->DevInput.clear();
for(int j = 0; j < Input.size(); j++){
FilterGroups[i]->DevInput.push_back(DevInput[j]);
}
FilterGroups[i]->InputCount = Input.size();
}
// DevOutput of the group
if(FilterGroups[i]->DevOutput.empty()||GlobalValues.bUseCPUMem){
FilterGroups[i]->ClearGPUOutput();
FilterGroups[i]->DevOutputAlloc();
}
}*/
/*if(Type == cGlobalValues::CONVOLUTION){
// Convolution dont use DevIn & DevOut
// DevInput of Layer
if(nLayerLevel != 0){
DevInput = LowerLayer->DevOutput;
}else{
cImagesGPU* DevSrc = new cImagesGPU(*Input[0]);
DevInput.push_back( DevSrc);
}
FilterGroup->DevInput.push_back(DevInput[0]);
FilterGroup->DevOutputAlloc();
}else if(Type == cGlobalValues::MAXPOOLING){
// DevInput layer and group
DevInput = LowerLayer->DevOutput;
FilterGroup->DevInput = DevInput;
// DevIn layer and Group
AllocDevIn();
FilterGroup->DevIn = DevIn;
// DevOut group
FilterGroup->DevOutAlloc();
// DevOut of layer & DevOutput of group
if(bCombineOutput){
DevOut = FilterGroup->DevOut;
}else{
DevOut = NULL;
FilterGroup->DevOutputAlloc();
}
}else if(Type == cGlobalValues::FULLYCONNECTED){
// DevInput of layer and group
DevInput = LowerLayer->DevOutput;
FilterGroup->DevInput = DevInput;
// DevIn of layer and group
if(LowerLayer->bCombineOutput){
DevIn = LowerLayer->DevOut;
}else{
AllocDevIn();
}
FilterGroup->DevIn = DevIn;
if(bCombineOutput){
// DevOut of group and layer, not using output here
FilterGroup->DevOutAlloc();
DevOut = FilterGroup->DevOut;
}else{
// Usually flly connected layers only output combined results
// Not over here
FilterGroup->DevOutputAlloc();
}
}else if(Type == cGlobalValues::OUTPUT){
// Usually dont use devinput
DevInput = LowerLayer->DevOutput;
FilterGroup->DevInput = DevInput;
DevIn = LowerLayer->DevOut;
FilterGroup->DevIn = DevIn;
FilterGroup->DevOutAlloc();
DevOut = FilterGroup->DevOut;
// Because output is always need to copy back to CPU
// Use DevOutput
FilterGroup->DevOutputAlloc();
DevOutput = FilterGroup->DevOutput;
}
// For all layers adds filter groups' devoutput to layer's output
// When its not empty and we are using GPU mem directly dont alloc new ones
if(DevOutput.empty()||GlobalValues.bUseCPUMem){
DevOutput.clear();
for(int j = 0; j < FilterGroup->DevOutput.size(); j++)
DevOutput.push_back(FilterGroup->DevOutput[j]);
}*/
}
void cLayer::Compute(){
// Mainly allocate the memory for device computation
// CheckDeviceMemory();
// Using CPU mem and copy inputs to device
//if(nLayerLevel == 0) FilterGroup->CopyInputs();
// Setting the pointers of DevInput into one device_ptr
if(Type != cGlobalValues::MAXPOOLING){
if(bFirstCom){
if(nLayerLevel == GlobalValues.sConvLayerCount + GlobalValues.sPoolLayerCount)
FilterGroup->ExtendInput1D();
else FilterGroup->ExtendInput();
}else if(nLayerLevel != 0){
if(nLayerLevel == GlobalValues.sConvLayerCount + GlobalValues.sPoolLayerCount)
FilterGroup->CopyDevInput1D(LowerLayer->FilterGroup->DevOutput[0], LowerLayer->FilterGroup->DevOutput[0]->size());
else FilterGroup->CopyDevInput(LowerLayer->FilterGroup->DevOutput[0], LowerLayer->FilterGroup->DevOutput[0]->size());
}
}
FilterGroup->Compute(0);
cudaEventRecord( EventBusy, *CalcStream);
/*checkCudaErrors( cudaEventSynchronize( EventBusy));*/
/*if(GlobalValues.bUseCPUMem) FilterGroup->CopyResults();
cudaEventRecord( EventBusy, GlobalValues.D2HStream);
checkCudaErrors( cudaEventSynchronize( EventBusy));
TraceOutput();*/
/*if(Type == cGlobalValues::FULLYCONNECTED){
FilterGroups[0]->ClearGPUTmp();
}*/
// Waiting for the GPU asynchronize computation finish
if(bFirstCom == true) bFirstCom = false;
}
void cLayer::CopyWeightsD2H(){
FilterGroup->CopyWeightsD2H();
//checkCudaErrors(cudaEventSynchronize(FilterGroup->EventD2H));
}
void cLayer::SaveWeights(){
FilterGroup->SaveWeights();
}
void cLayer::LoadWeights(){
FilterGroup->LoadWeights();
}
void cLayer::ClearGPUOutput(){
if(GlobalValues.bUseCPUMem){
FilterGroup->ClearGPUOutput();
}
}
void cLayer::TraceDevOutput(int col){
FilterGroup->TraceDevOutput(col);
}
cLayer::~cLayer(){
for(int i = 0; i < nFilterGroupCount; i++){
if(FilterGroup!= NULL) {
delete FilterGroup;
FilterGroup = NULL;
}
}
for(int i = 0; i < Input.size(); i++){
if(Input[i] != NULL){
Input[i] = NULL;
}
}
Input.clear();
for(int i = 0; i < Output.size(); i++){
if(Output[i] != NULL){
Output[i] = NULL;
}
}
Output.clear();
if(DevIn != NULL){
delete DevIn;
DevIn = NULL;
}
if(DevOut != NULL){
delete DevOut;
DevOut = NULL;
}
} |
bbb9b412b3cf5574977f284ef594531b19577e6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorSort.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorSort.cu"
#include <THH/THHGenerateFloatType.h>
| bbb9b412b3cf5574977f284ef594531b19577e6e.cu | #include "../THCTensorSort.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorSort.cu"
#include <THC/THCGenerateFloatType.h>
|
340c24c11386d3eb1ebb1221063a47c0dc11d4a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/thresholded_relu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] > alpha_ ? X[i] : 0;
}
}
template <typename T>
__global__ void
ThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] > 0 ? dY[i] : 0;
}
}
} // namespace
template <>
bool ThresholdedReluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( ThresholdedReluKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(), X.data<float>(), Y->template mutable_data<float>(), alpha_);
return true;
}
template <>
bool ThresholdedReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
hipLaunchKernelGGL(( ThresholdedReluGradientKernel),
dim3(CAFFE_GET_BLOCKS(Y.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
Y.size(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(ThresholdedRelu, ThresholdedReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ThresholdedReluGradient,
ThresholdedReluGradientOp<float, CUDAContext>);
} // namespace caffe2
| 340c24c11386d3eb1ebb1221063a47c0dc11d4a6.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/thresholded_relu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] > alpha_ ? X[i] : 0;
}
}
template <typename T>
__global__ void
ThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] > 0 ? dY[i] : 0;
}
}
} // namespace
template <>
bool ThresholdedReluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
ThresholdedReluKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(), X.data<float>(), Y->template mutable_data<float>(), alpha_);
return true;
}
template <>
bool ThresholdedReluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ThresholdedReluGradientKernel<<<
CAFFE_GET_BLOCKS(Y.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.size(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(ThresholdedRelu, ThresholdedReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ThresholdedReluGradient,
ThresholdedReluGradientOp<float, CUDAContext>);
} // namespace caffe2
|
89979e9c01b7ce16d62a733ceb41b16a85366459.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LeakyReluPlugin.h"
//cuda
__global__ void _leakyReluKer(float const *in, float *out, int size, float negative_slope) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= size)
return ;
if (in[index] < 0)
out[index] = in[index] * negative_slope;
else
out[index] = in[index];
}
// currently it is not possible for a plugin to execute "in place". Therefore we memcpy the data from the input to the output buffer
int LeakyReluPlugin::enqueue(int batchSize, const void*const *inputs, void** outputs, void*, hipStream_t stream) {
int block_size = 256;
int grid_size = (m_param.size + block_size - 1) / block_size;
hipLaunchKernelGGL(( _leakyReluKer), dim3(grid_size), dim3(block_size), 0, 0,
reinterpret_cast<float const*>(inputs[0]),
reinterpret_cast<float*>(outputs[0]), m_param.size, m_param.negative_slope);
return 0;
}
| 89979e9c01b7ce16d62a733ceb41b16a85366459.cu | #include "LeakyReluPlugin.h"
//cuda
__global__ void _leakyReluKer(float const *in, float *out, int size, float negative_slope) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= size)
return ;
if (in[index] < 0)
out[index] = in[index] * negative_slope;
else
out[index] = in[index];
}
// currently it is not possible for a plugin to execute "in place". Therefore we memcpy the data from the input to the output buffer
int LeakyReluPlugin::enqueue(int batchSize, const void*const *inputs, void** outputs, void*, cudaStream_t stream) {
int block_size = 256;
int grid_size = (m_param.size + block_size - 1) / block_size;
_leakyReluKer<<<grid_size, block_size>>>(
reinterpret_cast<float const*>(inputs[0]),
reinterpret_cast<float*>(outputs[0]), m_param.size, m_param.negative_slope);
return 0;
}
|
bd8b57335947bca85221c85b0bfa846840e9b76f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlarfbx.cu normal z -> d, Fri Jan 30 19:00:08 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_dgemv_kernel1(int m, const double * __restrict__ V, int ldv,
const double * __restrict__ c,
double *dwork)
{
const int i = threadIdx.x;
const double *dV = V + (blockIdx.x) * ldv;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
/* lsum := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_dgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
DGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_dgemv_kernel3(int m, const double * __restrict__ V, int ldv, double *c,
double *dwork, double *tau)
{
const int i = threadIdx.x;
const double *dV = V + (blockIdx.x) * ldv;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
if (i==0)
c[0] = MAGMA_D_ONE;
/* lsum := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_dgemv_kernel2(int m, int n, const double * __restrict__ V, int ldv,
const double * __restrict__ x, double *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
double lsum;
V += j;
lsum = MAGMA_D_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_D_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_dlarfbx_gpu(
magma_int_t m, magma_int_t k,
magmaDouble_ptr V, magma_int_t ldv,
magmaDouble_ptr dT, magma_int_t ldt,
magmaDouble_ptr c,
magmaDouble_ptr dwork)
{
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_dgemv_kernel1), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m, V, ldv, c, dwork);
/* dwork = T**H dwork */
hipLaunchKernelGGL(( magma_dtrmv_tkernel), dim3(k), dim3(k), 0, magma_stream , dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_dgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m, k, V, ldv, dwork+k, c);
}
//==============================================================================
| bd8b57335947bca85221c85b0bfa846840e9b76f.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlarfbx.cu normal z -> d, Fri Jan 30 19:00:08 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_dgemv_kernel1(int m, const double * __restrict__ V, int ldv,
const double * __restrict__ c,
double *dwork)
{
const int i = threadIdx.x;
const double *dV = V + (blockIdx.x) * ldv;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
/* lsum := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_dgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
DGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_dgemv_kernel3(int m, const double * __restrict__ V, int ldv, double *c,
double *dwork, double *tau)
{
const int i = threadIdx.x;
const double *dV = V + (blockIdx.x) * ldv;
__shared__ double sum[ BLOCK_SIZE ];
double lsum;
if (i==0)
c[0] = MAGMA_D_ONE;
/* lsum := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_dgemv_kernel2(int m, int n, const double * __restrict__ V, int ldv,
const double * __restrict__ x, double *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
double lsum;
V += j;
lsum = MAGMA_D_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_D_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_dlarfbx_gpu(
magma_int_t m, magma_int_t k,
magmaDouble_ptr V, magma_int_t ldv,
magmaDouble_ptr dT, magma_int_t ldt,
magmaDouble_ptr c,
magmaDouble_ptr dwork)
{
/* dwork = V**H c */
magma_dgemv_kernel1<<< k, BLOCK_SIZE, 0, magma_stream >>>(m, V, ldv, c, dwork);
/* dwork = T**H dwork */
magma_dtrmv_tkernel<<< k, k, 0, magma_stream >>>( dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
magma_dgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>( m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
4dd8e3d86d23f60bd8911fa44bdef5617acc33b7.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <opencv2/opencv.hpp>
using namespace std;
#define PGMHeaderSize 0x40
#define TILE_W 16
#define TILE_H 16
#define Rx 2 // filter radius in x direction
#define Ry 2 // filter radius in y direction
#define FILTER_W (Rx*2+1) // filter diameter in x direction
#define FILTER_H (Ry*2+1) // filter diameter in y direction
#define S (FILTER_W*FILTER_H) // filter size
#define BLOCK_W (TILE_W+(2*Rx)) // 16+ 2*2 = 20
#define BLOCK_H (TILE_H+(2*Ry))
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels);
__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h);
inline bool savePPM(const char *file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels);
inline void __checkCudaErrors(hipError_t err, const char *file, const int line);
int main()
{
// CPU
unsigned char *h_data=NULL;
unsigned int w,h,channels;
//load image
if(! loadPPM("lena.ppm", &h_data, &w, &h, &channels)){
cout<< "Failed to open File\n";
exit(EXIT_FAILURE);
}
cout<<"------> Loaded file with :"<<w<<"*" << h << " channels:"<<channels<<endl;
//GPU
unsigned char*d_idata=NULL;
unsigned char *d_odata=NULL;
size_t n_byte = w*h*channels * sizeof(unsigned char);
// GPU ---> CPU
cout<<"\n------> Allocate Devicememory for data"<<endl;
checkCudaErrors(hipMalloc((void **)&d_idata, n_byte));
checkCudaErrors(hipMalloc((void **)&d_odata, n_byte));
// Copy to device
cout<<"\n------> Copy h_data from the host memory to the CUDA device\n";
checkCudaErrors(hipMemcpy(d_idata, h_data, n_byte, hipMemcpyHostToDevice));
// kernel
int GRID_W = w/TILE_W +1; // 512/16 +1 = 33
int GRID_H = h/TILE_H +1;
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid(GRID_W,GRID_H);
cout<<"\n------> CUDA kernel launch with [" <<blocksPerGrid.x<<" "<< blocksPerGrid.y <<"] blocks of [" <<threadsPerBlock.x<<" "<< threadsPerBlock.y<< "]threads"<<endl;
hipLaunchKernelGGL(( box_filter), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_idata, d_odata, w,h);
checkCudaErrors(hipGetLastError());
// GPU ---> CPU
cout<<"\n------> Copy odata from the CUDA device to the host memory"<<endl;
checkCudaErrors(hipMemcpy(h_data, d_odata, n_byte, hipMemcpyDeviceToHost));
cout<<"\n------> Free Device memory"<<endl;
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
cv::Mat TempMat = cv::Mat(h, w, CV_8UC1, h_data);
cv::imshow("image output", TempMat);
cv::waitKey();
// Save Picture
cout<<"\n------> Save Picture"<<endl;
bool saved = false;
if (channels==1)
saved = savePPM("output.pgm", h_data, w, h, channels);
else if (channels==3)
saved = savePPM("output.ppm", h_data, w, h, channels);
else
cout<<"\n------> ERROR: Unable to save file - wrong channel!\n";
cout<<"\n------> Free Host memory\n";
free(h_data);
if (!saved){
cout<<"\n------> Failed to save File\n";
exit(EXIT_FAILURE);
}
cout<<"\n------> Done\n";
return 0;
}
__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h)
{
const int x = blockIdx.x * TILE_W + threadIdx.x - Rx;
const int y = blockIdx.y * TILE_H + threadIdx.y - Ry;
const int d = y * w+ x;
__shared__ float shMem[BLOCK_W][BLOCK_H]; // 20*20
if(x<0 || y<0 || x>=w || y>=h) // x et y [0,512]
{
shMem[threadIdx.x][threadIdx.y] = 0;
return;
}
shMem[threadIdx.x][threadIdx.y] = in[d];
__syncthreads();
if ((threadIdx.x >= Rx) && (threadIdx.x < (BLOCK_W-Rx)) && (threadIdx.y >= Ry) && (threadIdx.y < (BLOCK_H-Ry))) {
float sum = 0;
for(int dx=-Rx; dx<=Rx; dx++) {
for(int dy=-Ry; dy<=Ry; dy++) {
sum += shMem[threadIdx.x+dx][threadIdx.y+dy];
}
}
out[d] = sum / S;
}
}
inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels)
{
FILE *fp = NULL;
fp = fopen(file, "rb");
if (!fp) {
fprintf(stderr, "__LoadPPM() : unable to open file\n" );
return false;
}
// check header
char header[PGMHeaderSize];
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (strncmp(header, "P5", 2) == 0)
{
*channels = 1;
}
else if (strncmp(header, "P6", 2) == 0)
{
*channels = 3;
}
else
{
fprintf(stderr,"__LoadPPM() : File is not a PPM or PGM image\n" );
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3)
{
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (header[0] == '#')
{
continue;
}
if (i == 0)
{
i += sscanf(header, "%u %u %u", &width, &height, &maxval);
}
else if (i == 1)
{
i += sscanf(header, "%u %u", &height, &maxval);
}
else if (i == 2)
{
i += sscanf(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data)
{
if (*w != width || *h != height)
{
fprintf(stderr, "__LoadPPM() : Invalid image dimensions.\n" );
}
}
else
{
*data = (unsigned char *) malloc(sizeof(unsigned char) * width * height * *channels);
if (!data) {
fprintf(stderr, "Unable to allocate hostmemory\n");
return false;
}
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) == 0)
{
fprintf(stderr, "__LoadPPM() : read data returned error.\n" );
fclose(fp);
return false;
}
fclose(fp);
return true;
}
inline bool savePPM(const char *file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels)
{
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad())
{
fprintf(stderr, "__savePPM() : Opening file failed.\n" );
return false;
}
if (channels == 1)
{
fh << "P5\n";
}
else if (channels == 3)
{
fh << "P6\n";
}
else
{
fprintf(stderr, "__savePPM() : Invalid number of channels.\n" );
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w*h*channels)) && fh.good(); ++i)
{
fh << data[i];
}
fh.flush();
if (fh.bad())
{
fprintf(stderr,"__savePPM() : Writing data failed.\n" );
return false;
}
fh.close();
return true;
}
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
} | 4dd8e3d86d23f60bd8911fa44bdef5617acc33b7.cu | #include <fstream>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <opencv2/opencv.hpp>
using namespace std;
#define PGMHeaderSize 0x40
#define TILE_W 16
#define TILE_H 16
#define Rx 2 // filter radius in x direction
#define Ry 2 // filter radius in y direction
#define FILTER_W (Rx*2+1) // filter diameter in x direction
#define FILTER_H (Ry*2+1) // filter diameter in y direction
#define S (FILTER_W*FILTER_H) // filter size
#define BLOCK_W (TILE_W+(2*Rx)) // 16+ 2*2 = 20
#define BLOCK_H (TILE_H+(2*Ry))
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels);
__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h);
inline bool savePPM(const char *file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels);
inline void __checkCudaErrors(cudaError err, const char *file, const int line);
int main()
{
// CPU
unsigned char *h_data=NULL;
unsigned int w,h,channels;
//load image
if(! loadPPM("lena.ppm", &h_data, &w, &h, &channels)){
cout<< "Failed to open File\n";
exit(EXIT_FAILURE);
}
cout<<"------> Loaded file with :"<<w<<"*" << h << " channels:"<<channels<<endl;
//GPU
unsigned char*d_idata=NULL;
unsigned char *d_odata=NULL;
size_t n_byte = w*h*channels * sizeof(unsigned char);
// GPU ---> CPU
cout<<"\n------> Allocate Devicememory for data"<<endl;
checkCudaErrors(cudaMalloc((void **)&d_idata, n_byte));
checkCudaErrors(cudaMalloc((void **)&d_odata, n_byte));
// Copy to device
cout<<"\n------> Copy h_data from the host memory to the CUDA device\n";
checkCudaErrors(cudaMemcpy(d_idata, h_data, n_byte, cudaMemcpyHostToDevice));
// kernel
int GRID_W = w/TILE_W +1; // 512/16 +1 = 33
int GRID_H = h/TILE_H +1;
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid(GRID_W,GRID_H);
cout<<"\n------> CUDA kernel launch with [" <<blocksPerGrid.x<<" "<< blocksPerGrid.y <<"] blocks of [" <<threadsPerBlock.x<<" "<< threadsPerBlock.y<< "]threads"<<endl;
box_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w,h);
checkCudaErrors(cudaGetLastError());
// GPU ---> CPU
cout<<"\n------> Copy odata from the CUDA device to the host memory"<<endl;
checkCudaErrors(cudaMemcpy(h_data, d_odata, n_byte, cudaMemcpyDeviceToHost));
cout<<"\n------> Free Device memory"<<endl;
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
cv::Mat TempMat = cv::Mat(h, w, CV_8UC1, h_data);
cv::imshow("image output", TempMat);
cv::waitKey();
// Save Picture
cout<<"\n------> Save Picture"<<endl;
bool saved = false;
if (channels==1)
saved = savePPM("output.pgm", h_data, w, h, channels);
else if (channels==3)
saved = savePPM("output.ppm", h_data, w, h, channels);
else
cout<<"\n------> ERROR: Unable to save file - wrong channel!\n";
cout<<"\n------> Free Host memory\n";
free(h_data);
if (!saved){
cout<<"\n------> Failed to save File\n";
exit(EXIT_FAILURE);
}
cout<<"\n------> Done\n";
return 0;
}
__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h)
{
const int x = blockIdx.x * TILE_W + threadIdx.x - Rx;
const int y = blockIdx.y * TILE_H + threadIdx.y - Ry;
const int d = y * w+ x;
__shared__ float shMem[BLOCK_W][BLOCK_H]; // 20*20
if(x<0 || y<0 || x>=w || y>=h) // x et y ∈ [0,512]
{
shMem[threadIdx.x][threadIdx.y] = 0;
return;
}
shMem[threadIdx.x][threadIdx.y] = in[d];
__syncthreads();
if ((threadIdx.x >= Rx) && (threadIdx.x < (BLOCK_W-Rx)) && (threadIdx.y >= Ry) && (threadIdx.y < (BLOCK_H-Ry))) {
float sum = 0;
for(int dx=-Rx; dx<=Rx; dx++) {
for(int dy=-Ry; dy<=Ry; dy++) {
sum += shMem[threadIdx.x+dx][threadIdx.y+dy];
}
}
out[d] = sum / S;
}
}
inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels)
{
FILE *fp = NULL;
fp = fopen(file, "rb");
if (!fp) {
fprintf(stderr, "__LoadPPM() : unable to open file\n" );
return false;
}
// check header
char header[PGMHeaderSize];
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (strncmp(header, "P5", 2) == 0)
{
*channels = 1;
}
else if (strncmp(header, "P6", 2) == 0)
{
*channels = 3;
}
else
{
fprintf(stderr,"__LoadPPM() : File is not a PPM or PGM image\n" );
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3)
{
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (header[0] == '#')
{
continue;
}
if (i == 0)
{
i += sscanf(header, "%u %u %u", &width, &height, &maxval);
}
else if (i == 1)
{
i += sscanf(header, "%u %u", &height, &maxval);
}
else if (i == 2)
{
i += sscanf(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data)
{
if (*w != width || *h != height)
{
fprintf(stderr, "__LoadPPM() : Invalid image dimensions.\n" );
}
}
else
{
*data = (unsigned char *) malloc(sizeof(unsigned char) * width * height * *channels);
if (!data) {
fprintf(stderr, "Unable to allocate hostmemory\n");
return false;
}
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) == 0)
{
fprintf(stderr, "__LoadPPM() : read data returned error.\n" );
fclose(fp);
return false;
}
fclose(fp);
return true;
}
inline bool savePPM(const char *file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels)
{
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad())
{
fprintf(stderr, "__savePPM() : Opening file failed.\n" );
return false;
}
if (channels == 1)
{
fh << "P5\n";
}
else if (channels == 3)
{
fh << "P6\n";
}
else
{
fprintf(stderr, "__savePPM() : Invalid number of channels.\n" );
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w*h*channels)) && fh.good(); ++i)
{
fh << data[i];
}
fh.flush();
if (fh.bad())
{
fprintf(stderr,"__savePPM() : Writing data failed.\n" );
return false;
}
fh.close();
return true;
}
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} |
2a0428665e28f2bcbfe3f0ce121f1881a5073ea2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <signal.h>
//#include "mkl.h"
#include <hip/hip_runtime.h>
#include <hipsparse.h>
static volatile int running = 1;
void interrupt_handler(int signal) {
running = 0;
}
#define cudaCheck(result) __cudaCheck(result, __FILE__, __LINE__)
inline hipError_t __cudaCheck(hipError_t result, const char *file, const int line, bool abort = true) {
if (result != hipSuccess) {
fprintf(stderr, "CUDA error at %s[%i]: %s\n", file, line, hipGetErrorString(result));
if (abort) {
exit(result);
}
}
return result;
}
const char* hipsparseGetErrorString(hipsparseStatus_t result) {
switch (result) {
case HIPSPARSE_STATUS_SUCCESS:
return "HIPSPARSE_STATUS_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "HIPSPARSE_STATUS_MAPPING_ERROR";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "HIPSPARSE_STATUS_ZERO_PIVOT";
}
return "CUSPARSE_UNKNOWN_ERROR";
}
#define cusparseCheck(result) __cusparseCheck(result, __FILE__, __LINE__)
inline hipsparseStatus_t __cusparseCheck(hipsparseStatus_t result, const char *file, const int line, bool abort = true) {
if (result != HIPSPARSE_STATUS_SUCCESS) {
fprintf(stderr, "CUSPARSE error at %s[%i]: %s\n", file, line, hipsparseGetErrorString(result));
if (abort) {
exit(result);
}
}
return result;
}
double get_time_ms() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
return ts.tv_sec * 1000 + ts.tv_nsec / 1000000.0;
} else {
return 0;
}
}
double get_time_s() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
return ts.tv_sec + ts.tv_nsec / 1e9;
} else {
return 0;
}
}
int compare_strings(const void *va, const void *vb) {
char **a = (char **) va;
char **b = (char **) vb;
return strcmp(*a, *b);
}
int read_files(const char *path, const char *pattern, char **files) {
int n_files = 0;
DIR *d = opendir(path);
struct dirent *dir;
if (d) {
while ((dir = readdir(d)) != NULL) {
if (strstr(dir->d_name, pattern) != NULL) {
files[n_files] = (char *) malloc(1000 * sizeof(char));
strcpy(files[n_files], path);
if (path[strlen(path) - 1] != '/') {
strcat(files[n_files], (char *) "/");
}
strcat(files[n_files], dir->d_name);
n_files++;
}
}
closedir(d);
}
qsort(files, n_files, sizeof(char *), compare_strings);
return n_files;
}
struct SparseMatrix {
int n_nz;
int n_rows;
int n_cols;
int *rows;
int *cols;
double *vals;
int *d_rows;
int *d_cols;
double *d_vals;
int *d_rows_t;
int *d_cols_t;
double *d_vals_t;
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
hipsparseSpMatDescr_t sp_descr;
hipsparseSpMatDescr_t sp_descr_t;
void malloc_cpu() {
rows = (int *) malloc(n_nz*sizeof(int));
cols = (int *) malloc(n_nz*sizeof(int));
vals = (double *) malloc(n_nz*sizeof(double));
n_rows = 0;
n_cols = 0;
}
void free_cpu() {
free(rows);
free(cols);
free(vals);
}
void free_gpu() {
cudaCheck(hipFree(d_rows));
cudaCheck(hipFree(d_cols));
cudaCheck(hipFree(d_vals));
cudaCheck(hipFree(d_rows_t));
cudaCheck(hipFree(d_cols_t));
cudaCheck(hipFree(d_vals_t));
}
void copy_to_gpu() {
cudaCheck(hipMalloc(&d_rows, n_nz*sizeof(int)));
cudaCheck(hipMalloc(&d_cols, n_nz*sizeof(int)));
cudaCheck(hipMalloc(&d_vals, n_nz*sizeof(double)));
cudaCheck(hipMalloc(&d_rows_t, (n_cols+1)*sizeof(int)));
cudaCheck(hipMalloc(&d_cols_t, n_nz*sizeof(int)));
cudaCheck(hipMalloc(&d_vals_t, n_nz*sizeof(double)));
cudaCheck(hipMemcpy(d_rows, rows, n_nz*sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_cols, cols, n_nz*sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_vals, vals, n_nz*sizeof(double), hipMemcpyHostToDevice));
}
void sort_by_row() {
int *p;
void *p_buffer;
size_t p_buffer_size = 0;
double *sorted_vals;
cudaCheck(hipMalloc(&sorted_vals, n_nz*sizeof(double)));
cusparseCheck(hipsparseXcsrsort_bufferSizeExt(handle, n_rows, n_cols, n_nz, d_rows, d_cols, &p_buffer_size));
cudaCheck(hipMalloc(&p, n_nz*sizeof(int)));
cudaCheck(hipMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(hipsparseCreateIdentityPermutation(handle, n_nz, p));
cusparseCheck(hipsparseXcoosortByRow(handle, n_rows, n_cols, n_nz, d_rows, d_cols, p, p_buffer));
cusparseCheck(hipsparseDgthr(handle, n_nz, d_vals, sorted_vals, p, HIPSPARSE_INDEX_BASE_ZERO));
cudaCheck(hipFree(d_vals));
cudaCheck(hipFree(p));
cudaCheck(hipFree(p_buffer));
d_vals = sorted_vals;
}
void coo_to_csr() {
int *csr;
cudaCheck(hipMalloc(&csr, (n_rows+1)*sizeof(int)));
cusparseCheck(hipsparseXcoo2csr(handle, d_rows, n_nz, n_rows, csr, HIPSPARSE_INDEX_BASE_ZERO));
cudaCheck(hipFree(d_rows));
d_rows = csr;
cusparseCheck(hipsparseCreateCsr(&sp_descr, n_rows, n_cols, n_nz, d_rows, d_cols, d_vals,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, HIP_R_64F));
}
void transpose_csr() {
void *p_buffer;
size_t p_buffer_size = 0;
cusparseCheck(hipsparseCsr2cscEx2_bufferSize(handle, n_rows, n_cols, n_nz, d_vals, d_rows, d_cols,
d_vals_t, d_rows_t, d_cols_t, HIP_R_64F, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO,
HIPSPARSE_CSR2CSC_ALG2, &p_buffer_size));
cudaCheck(hipMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(hipsparseCsr2cscEx2(handle, n_rows, n_cols, n_nz, d_vals, d_rows, d_cols,
d_vals_t, d_rows_t, d_cols_t, HIP_R_64F, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO,
HIPSPARSE_CSR2CSC_ALG2, p_buffer));
cudaCheck(hipFree(p_buffer));
cusparseCheck(hipsparseCreateCsr(&sp_descr_t, n_cols, n_rows, n_nz, d_rows_t, d_cols_t, d_vals_t,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, HIP_R_64F));
}
void setup_gpu() {
cusparseCheck(hipsparseCreate(&handle));
cusparseCheck(hipsparseCreateMatDescr(&descr));
cusparseCheck(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
cusparseCheck(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO));
copy_to_gpu();
sort_by_row();
coo_to_csr();
transpose_csr();
}
};
struct Region {
char *name;
int n_voxels;
double min;
double max;
double avg;
double f; // Objective function evaluation
double eud;
double dF_dEUD;
double sum_alpha;
// Virtual EUD to control PTV overdosage
// Hardcoded to eud + 1 for now
double v_f;
double v_eud;
double v_dF_dEUD;
double v_sum_alpha;
bool is_optimized;
bool is_ptv;
double pr_min;
double pr_max;
double pr_avg_min;
double pr_avg_max;
double *grad_avg;
double pr_eud;
int penalty;
int alpha;
void set_targets(bool t_ptv, double t_min, double t_avg_min, double t_avg_max, double t_max,
double t_eud, int t_alpha, int t_penalty) {
if (t_eud < 0 && t_min < 0 && t_max < 0 &&
t_avg_min < 0 && t_avg_max < 0) {
is_optimized = false;
} else {
is_optimized = true;
is_ptv = t_ptv;
pr_min = t_min;
pr_max = t_max;
pr_avg_min = t_avg_min;
pr_avg_max = t_avg_max;
pr_eud = t_eud;
alpha = t_alpha;
penalty = t_penalty;
f = 0;
v_f = 0;
eud = 0;
v_eud = 0;
dF_dEUD = 0;
v_dF_dEUD = 0;
sum_alpha = 0;
v_sum_alpha = 0;
}
}
};
__inline__ __device__ float warpReduceMin(float val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
float v = __shfl_down_sync(0xffffffff, val, offset);
if (v < val) {
val = v;
}
}
return val;
}
__inline__ __device__ float warpReduceMax(float val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
float v = __shfl_down_sync(0xffffffff, val, offset);
if (v > val) {
val = v;
}
}
return val;
}
template <class T>
__inline__ __device__ T warpReduceSum(T val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += __shfl_down_sync(0xffffffff, val, offset);
}
return val;
}
template <class T>
__inline__ __device__ T blockReduce(T val, T (*warp_reduction)(T), T defval) {
__shared__ T shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warp_reduction(val);
// Write reduced value to shared memory
if (lane == 0) {
shared[wid] = val;
}
__syncthreads();
// Ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : defval;
if (wid == 0) {
val = warp_reduction(val);
}
return val;
}
__global__ void stats_gpu(double *doses, char *voxel_regions, int n_regions, int n_voxels,
Region *regions) {
int rid = blockIdx.x;
float min = 1e10;
float max = 0;
float sum = 0;
double eud = 0;
double v_eud = 0;
double sum_alpha = 0;
double v_sum_alpha = 0;
for (int i = threadIdx.x; i < n_voxels; i += blockDim.x) {
if (voxel_regions[rid*n_voxels + i]) {
float dose = doses[i];
if (dose < min) {
min = dose;
}
if (dose > max) {
max = dose;
}
sum += dose;
if (dose > 0) {
sum_alpha += pow((double) dose, (double) regions[rid].alpha);
if (regions[rid].is_ptv) {
v_sum_alpha += pow((double) dose, (double) -regions[rid].alpha);
}
}
}
}
min = blockReduce<float>(min, warpReduceMin, 1e10);
max = blockReduce<float>(max, warpReduceMax, 0);
sum = blockReduce<float>(sum, warpReduceSum, 0);
sum_alpha = blockReduce<double>(sum_alpha, warpReduceSum, 0);
if (regions[rid].is_ptv) {
v_sum_alpha = blockReduce<double>(v_sum_alpha, warpReduceSum, 0);
}
if (threadIdx.x == 0) {
regions[rid].min = min;
regions[rid].max = max;
regions[rid].avg = sum / regions[rid].n_voxels;
eud = pow(sum_alpha/regions[rid].n_voxels, 1.0/regions[rid].alpha);
regions[rid].sum_alpha = sum_alpha;
regions[rid].eud = eud;
if (regions[rid].is_ptv) {
v_eud = pow(v_sum_alpha/regions[rid].n_voxels, 1.0/-regions[rid].alpha);
regions[rid].v_sum_alpha = v_sum_alpha;
regions[rid].v_eud = v_eud;
}
if (regions[rid].is_optimized) {
int n = regions[rid].penalty;
int pd = regions[rid].pr_eud;
int v_pd = pd + 1; // Hardcoded virtual PTV prescribed dose
if (regions[rid].is_ptv) {
regions[rid].f = 1/(1 + pow(pd/eud, n));
regions[rid].dF_dEUD = (n*regions[rid].f/eud) * pow(pd/eud, n);
// Virtual EUD to control PTV over-dosage
regions[rid].v_f = 1/(1 + pow(v_eud/v_pd, n));
regions[rid].v_dF_dEUD = -(n*regions[rid].v_f/v_eud) * pow(v_eud/v_pd, n);
} else {
regions[rid].f = 1/(1 + pow(eud/pd, n));
regions[rid].dF_dEUD = -(n*regions[rid].f/eud) * pow(eud/pd, n);
}
}
}
}
__global__ void init_fluence(double *fluence, int n_beamlets, double value) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_beamlets) {
fluence[idx] = value;
}
}
__global__ void scale_doses(double *doses, int n_voxels, double dose_grid_scaling) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
doses[idx] *= dose_grid_scaling;
}
}
__global__ void apply_gradients_OLD(double *gradients, double *momentum, int n_beamlets, int n_gradients, float step, double *fluence) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int beta = 0.9;
if (idx < n_beamlets) {
double gradient = 0;
for (int i = 0; i < n_gradients; i++) {
gradient += gradients[i*n_beamlets + idx];
}
momentum[idx] = beta*momentum[idx] + (1-beta)*gradient;
fluence[idx] += step*momentum[idx];
if (fluence[idx] < 0) {
fluence[idx] = 0;
}
if (fluence[idx] > 1) {
fluence[idx] = 1;
}
}
}
//__global__ void apply_gradients(double *gradients, double *momentum, int n_beamlets, int n_gradients, float step, double *fluence) {
// int idx = blockIdx.x*blockDim.x + threadIdx.x;
// int beta = 0.9;
//
// if (idx < n_beamlets) {
// double gradient = 0;
// for (int i = 0; i < n_gradients; i++) {
// momentum[i*n_beamlets + idx] = beta*momentum[i*n_beamlets + idx] + (1-beta)*gradients[i*n_beamlets + idx];
// gradient += momentum[i*n_beamlets + idx];
// }
//
// fluence[idx] += step*gradient;
//
// if (fluence[idx] < 0) {
// fluence[idx] = 0;
// }
// if (fluence[idx] > 0.2) {
// fluence[idx] = 0.2;
// }
// }
//}
__global__ void apply_gradients(double *gradients, double *momentum, int n_beamlets, int n_gradients, float step, double *fluence) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int beta = 0.9;
if (idx < n_beamlets) {
momentum[idx] = beta*momentum[idx] + (1-beta)*gradients[idx];
fluence[idx] += step*momentum[idx];
// Implementacin muy shitty
//double threshold = 0.02;
//if (idx > 0) {
// double diff = fluence[idx] - fluence[idx-1];
// if (diff > threshold) {
// fluence[idx] -= 0.01;
// }
//}
//if (idx < n_beamlets - 1) {
// double diff = fluence[idx] - fluence[idx+1];
// if (diff > threshold) {
// fluence[idx] -= 0.01;
// }
//}
// Smoothing cutre :)
//double w = 1e-1;
//if (idx > 0 && idx < n_beamlets - 1) {
// double neigh_avg = (fluence[idx - 1] + fluence[idx] + fluence[idx + 1])/3;
// if (fluence[idx] > neigh_avg) {
// fluence[idx] -= w*abs(fluence[idx] - neigh_avg);
// }
// if (fluence[idx] < neigh_avg) {
// fluence[idx] += w*abs(fluence[idx] - neigh_avg);
// }
//}
if (fluence[idx] < 0) {
fluence[idx] = 0;
}
if (fluence[idx] > 0.3) {
fluence[idx] = 0.3;
}
}
}
#define BEAM_MAP_X 120
#define BEAM_MAP_Y 120
struct Plan {
char *name;
int n_beams;
int n_beamlets;
int *n_beamlets_beam;
int n_voxels;
int n_regions;
double dose_grid_scaling;
Region* regions;
Region* d_regions;
char *voxel_regions;
char *d_voxel_regions;
SparseMatrix spm;
double *fluence;
double *smoothed_fluence;
double *doses;
double *d_fluence;
double *d_doses;
char *files[100];
int *beam_maps;
int *d_beam_maps;
hipsparseDnVecDescr_t fluence_descr;
hipsparseDnVecDescr_t doses_descr;
void check_line(int result) {
if (result < 0) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to read line.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
char* get_str(char *line, size_t len) {
char *temp = (char *) malloc(len*sizeof(char));
snprintf(temp, len, "%s", line);
temp[strcspn(temp, "\r\n")] = 0; // Remove newline
return temp;
}
int get_int(char *line, char **end) {
return strtoll(line, end, 10);
}
float get_float(char *line, char **end) {
return strtof(line, end);
}
void parse_config(const char *path) {
int n_files = read_files(path, "m_", files);
FILE *f = fopen(files[0], "r");
if (f == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("Loading %s...\n", files[0]);
char *line = NULL;
char *end = NULL;
size_t len = 0;
check_line(getline(&line, &len, f));
name = get_str(line, len);
check_line(getline(&line, &len, f));
n_beams = get_int(line, &end);
n_beamlets = 0;
n_beamlets_beam = (int *)malloc(n_beams * sizeof(int));
for (int i = 0; i < n_beams; i++) {
check_line(getline(&line, &len, f));
int index = get_int(line, &end);
int beamlets = get_int(end, &line);
n_beamlets_beam[index - 1] = beamlets;
n_beamlets += beamlets;
}
check_line(getline(&line, &len, f));
n_voxels = get_int(line, &end);
check_line(getline(&line, &len, f));
dose_grid_scaling = get_float(line, &end);
check_line(getline(&line, &len, f));
n_regions = get_int(line, &end);
regions = (Region *) malloc(n_regions*sizeof(Region));
for (int i = 0; i < n_regions; i++) {
check_line(getline(&line, &len, f));
get_int(line, &end);
char *name = get_str(end + 1, len);
regions[i].name = name;
regions[i].n_voxels = 0;
}
line = NULL;
len = 0;
while (getline(&line, &len, f) != -1) {
fprintf(stderr, "[WARNING] Line not processed: %s", line);
}
fclose(f);
free(files[0]);
cudaCheck(hipMalloc(&d_regions, n_regions*sizeof(Region)));
}
void parse_voxel_regions(const char *path) {
int n_files = read_files(path, "v_", files);
FILE *f = fopen(files[0], "r");
if (f == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("Loading %s...\n", files[0]);
voxel_regions = (char *) malloc(n_voxels*n_regions*sizeof(char));
char line[1024];
int num = 0;
int offset = 0;
while (fgets(line, sizeof line, f)) {
if (sscanf(line, "%d", &num)) {
for (int i = 0; i < n_regions; i++) {
voxel_regions[offset + i*n_voxels] = num & 1;
num >>= 1;
}
offset++;
} else {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to read voxel regions.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < n_regions; i++) {
for (int j = 0; j < n_voxels; j++) {
if (voxel_regions[i*n_voxels + j]) {
regions[i].n_voxels += 1;
}
}
}
fclose(f);
free(files[0]);
cudaCheck(hipMalloc(&d_voxel_regions, n_voxels*n_regions*sizeof(char)));
cudaCheck(hipMemcpy(d_voxel_regions, voxel_regions, n_voxels*n_regions*sizeof(char), hipMemcpyHostToDevice));
}
void load_spm(const char *path) {
int n_files = read_files(path, "d_", files);
FILE **fp = (FILE **) malloc(n_files*sizeof(FILE *));
for (int i = 0; i < n_files; i++) {
fp[i] = fopen(files[i], "r");
if (fp[i] == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
int n_nz = 0;
int count = fscanf(fp[i], "%d", &n_nz);
spm.n_nz += n_nz;
}
spm.malloc_cpu();
int idx = 0;
int offset = 0;
for (int i = 0; i < n_files; i++) {
printf("Loading %s... ", files[i]);
int n_read = 0;
while (true) {
int row, col;
double val;
int count = fscanf(fp[i], "%d %d %lf", &row, &col, &val);
if(count == EOF || !count) {
break;
}
int new_col = offset + col;
spm.rows[idx] = row;
spm.cols[idx] = new_col;
spm.vals[idx] = val;
idx++;
n_read++;
if (row > spm.n_rows) {
spm.n_rows = row;
}
if (new_col > spm.n_cols) {
spm.n_cols = new_col;
}
}
printf("%d values read.\n", n_read);
offset = spm.n_cols + 1;
fclose(fp[i]);
free(files[i]);
}
spm.n_rows++;
// Sometimes there's missing voxels,
// but we want the dimensions to match for SpMM
if (spm.n_rows < n_voxels) {
spm.n_rows = n_voxels;
}
spm.n_cols++;
free(fp);
}
void load_fluence(const char *path, const char *prefix) {
int n_files = read_files(path, prefix, files);
FILE **fp = (FILE **) malloc(n_files*sizeof(FILE *));
int idx = 0;
for (int i = 0; i < n_files; i++) {
printf("Loading %s... ", files[i]);
fp[i] = fopen(files[i], "r");
if (fp[i] == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
int n_read = 0;
while (true) {
int count = fscanf(fp[i], "%lf", &(fluence[idx]));
if(count == EOF || !count) {
break;
}
idx++;
n_read++;
}
printf("%d values read.\n", n_read);
fclose(fp[i]);
free(files[i]);
}
cudaCheck(hipMemcpy(d_fluence, fluence, n_beamlets*sizeof(double), hipMemcpyHostToDevice));
free(fp);
}
void load_coords(const char *path) {
int n_files = read_files(path, "xcoords_", files);
beam_maps = (int *) malloc(n_beams*BEAM_MAP_Y*BEAM_MAP_X*sizeof(int));
for (int i = 0; i < n_beams*BEAM_MAP_Y*BEAM_MAP_X; i++) {
beam_maps[i] = -1;
}
int idx = 0;
FILE **fp = (FILE **) malloc(n_files*sizeof(FILE *));
for (int i = 0; i < n_files; i++) {
fp[i] = fopen(files[i], "r");
if (fp[i] == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("Loading %s... ", files[i]);
char ignored[1024];
int n_read = 0;
while (true) {
int col, row;
int count = fscanf(fp[i], "%d %d", &col, &row);
if(count == EOF) {
break;
} else if (!count) {
fgets(ignored, sizeof(ignored), fp[i]);
} else if (count == 1) {
// Header values, ignored
continue;
} else if (count == 2) {
beam_maps[i*BEAM_MAP_Y*BEAM_MAP_X + col*BEAM_MAP_Y + row] = idx;
n_read++;
idx++;
} else {
fprintf(stderr, "ERROR in %s (%s:%d): While reading coordinate file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
printf("%d coordinates read.\n", n_read);
fclose(fp[i]);
free(files[i]);
}
//cudaCheck(hipMemcpy(d_beam_maps, beam_maps,
// n_beams*BEAM_MAP_Y*BEAM_MAP_X*sizeof(int), hipMemcpyHostToDevice));
free(fp);
}
void init_fluence(float value) {
for (int i = 0; i < n_beamlets; i++) {
fluence[i] = value;
}
cudaCheck(hipMemcpy(d_fluence, fluence, n_beamlets*sizeof(double), hipMemcpyHostToDevice));
}
void print() {
printf("Name: %s\n", name);
printf("Number of beams: %d\n", n_beams);
for (int i = 0; i < n_beams; i++) {
printf(" Beam %d: %d beamlets\n", i + 1, n_beamlets_beam[i]);
}
printf("Total: %d beamlets\n", n_beamlets);
printf("Number of voxels: %d\n", n_voxels);
printf("Dose Grid Scaling: %e\n", dose_grid_scaling);
printf("Number of regions: %d\n", n_regions);
for (int i = 0; i < n_regions; i++) {
printf(" Region %2d (%4d): %-16s %8d voxels\n", i, (int) pow(2, i), regions[i].name, regions[i].n_voxels);
}
printf("Dose matrix: %d x %d with %d nonzeros.\n", spm.n_rows, spm.n_cols, spm.n_nz);
}
void compute_dose() {
//cudaCheck(hipMemcpy(d_fluence, fluence, n_beamlets*sizeof(double), hipMemcpyHostToDevice));
//memset(doses, 0, n_voxels*sizeof(*doses));
cudaCheck(hipMemset(d_doses, 0, n_voxels*sizeof(*d_doses)));
//cusparseCheck(hipsparseDcsrmv(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, spm.n_rows, spm.n_cols, spm.n_nz, &alpha, spm.descr, spm.d_vals, spm.d_rows, spm.d_cols, d_fluence, &beta, d_doses));
double alpha = 1.0, beta = 0.0;
void *p_buffer;
size_t p_buffer_size = 0;
cusparseCheck(hipsparseSpMV_bufferSize(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr, fluence_descr, &beta,
doses_descr, HIP_R_64F, HIPSPARSE_CSRMV_ALG1, &p_buffer_size));
cudaCheck(hipMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(hipsparseSpMV(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr, fluence_descr, &beta,
doses_descr, HIP_R_64F, HIPSPARSE_CSRMV_ALG1, p_buffer));
int block = 512;
int grid = (n_voxels + block - 1)/block;
hipLaunchKernelGGL(( scale_doses), dim3(grid), dim3(block), 0, 0, d_doses, n_voxels, dose_grid_scaling);
//cudaCheck(hipMemcpy(doses, d_doses, n_voxels*sizeof(double), hipMemcpyDeviceToHost));
}
void stats() {
//cudaCheck(hipMemcpy(d_regions, regions, n_regions*sizeof(Region), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( stats_gpu), dim3(n_regions), dim3(512), 0, 0, d_doses, d_voxel_regions, n_regions, n_voxels, d_regions);
//cudaCheck(hipMemcpy(regions, d_regions, n_regions*sizeof(Region), hipMemcpyDeviceToHost));
}
void print_table() {
printf(" Region Min Avg Max EUD dF_dEUD v_EUD v_dF_dEUD f v_f\n");
for (int i = 0; i < n_regions; i++) {
if (regions[i].is_optimized) {
printf("%-15s %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf\n", regions[i].name, regions[i].min, regions[i].avg, regions[i].max, regions[i].eud, regions[i].dF_dEUD, regions[i].v_eud, regions[i].v_dF_dEUD, regions[i].f, regions[i].v_f);
}
}
}
void load(const char *plan_path, const char *fluence_path, const char *fluence_prefix) {
parse_config(plan_path);
parse_voxel_regions(plan_path);
load_spm(plan_path);
spm.setup_gpu();
fluence = (double *) malloc(n_beamlets*sizeof(double));
smoothed_fluence = (double *) malloc(n_beamlets*sizeof(double));
cudaCheck(hipMalloc(&d_fluence, n_beamlets*sizeof(double)));
doses = (double *) malloc(n_voxels*sizeof(double));
cudaCheck(hipMalloc(&d_doses, n_voxels*sizeof(double)));
load_coords(plan_path);
load_fluence(fluence_path, fluence_prefix);
//init_fluence(1e-2);
print();
hipsparseCreateDnVec(&fluence_descr, n_beamlets, d_fluence, HIP_R_64F);
hipsparseCreateDnVec(&doses_descr, n_voxels, d_doses, HIP_R_64F);
}
void smooth_cpu() {
int n_neighbors = 8;
int sum_weights = 1000;
int *neighbors = (int *) malloc(n_neighbors*sizeof(int));
for (int i = 0; i < n_beams; i++) {
for (int y = 0; y < BEAM_MAP_Y; y++) {
for (int x = 0; x < BEAM_MAP_X; x++) {
int offset = i*BEAM_MAP_Y*BEAM_MAP_X;
int idx = beam_maps[offset + BEAM_MAP_Y*y + x];
float center_weight = sum_weights - n_neighbors;
if (idx >= 0) {
smoothed_fluence[idx] = 0;
neighbors[0] = beam_maps[offset + BEAM_MAP_Y*(y-1) + (x-1)];
neighbors[1] = beam_maps[offset + BEAM_MAP_Y*(y-1) + (x )];
neighbors[2] = beam_maps[offset + BEAM_MAP_Y*(y-1) + (x+1)];
neighbors[3] = beam_maps[offset + BEAM_MAP_Y*(y ) + (x-1)];
neighbors[4] = beam_maps[offset + BEAM_MAP_Y*(y ) + (x+1)];
neighbors[5] = beam_maps[offset + BEAM_MAP_Y*(y+1) + (x-1)];
neighbors[6] = beam_maps[offset + BEAM_MAP_Y*(y+1) + (x )];
neighbors[7] = beam_maps[offset + BEAM_MAP_Y*(y+1) + (x+1)];
//if (neighbors[3] < 0 || neighbors[4] < 0) {
// // This is a border beamlet, ignore other rows
// neighbors[0] = -1;
// neighbors[1] = -1;
// neighbors[2] = -1;
// neighbors[5] = -1;
// neighbors[6] = -1;
// neighbors[7] = -1;
//}
for (int j = 0; j < n_neighbors; j++) {
if (neighbors[j] >= 0) {
smoothed_fluence[idx] += fluence[neighbors[j]];
} else {
center_weight += 0.8;
}
}
smoothed_fluence[idx] += center_weight*fluence[idx];
smoothed_fluence[idx] /= sum_weights;
}
}
}
}
for (int i = 0; i < n_beamlets; i++) {
fluence[i] = smoothed_fluence[i];
}
free(neighbors);
}
};
__global__ void voxels_min(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx]) {
voxels[idx] = (doses[idx] < regions[rid].pr_min);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_min_old(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx] && doses[idx] < regions[rid].pr_min) {
voxels[idx] = (regions[rid].pr_min - doses[idx]);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_max(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx]) {
voxels[idx] = -1*(doses[idx] > regions[rid].pr_max);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_max_old(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx] && doses[idx] > regions[rid].pr_max) {
voxels[idx] = (regions[rid].pr_max - doses[idx]);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_average(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (regions[rid].avg < regions[rid].pr_avg_min) {
voxels[idx] = voxel_regions[rid*n_voxels + idx];
} else if (regions[rid].avg > regions[rid].pr_avg_max) {
voxels[idx] = -voxel_regions[rid*n_voxels + idx];
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_eud(Region *regions, char *voxel_regions, double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
Region r = regions[rid];
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx]) {
double dEUD_dd = r.eud*pow(doses[idx], r.alpha - 1)/r.sum_alpha;
voxels[idx] = r.dF_dEUD * dEUD_dd;
if (r.is_ptv) {
dEUD_dd = r.v_eud*pow(doses[idx], -r.alpha - 1)/r.v_sum_alpha;
voxels[n_voxels + idx] = r.v_dF_dEUD * dEUD_dd;
}
} else {
voxels[idx] = 0;
if (r.is_ptv) {
voxels[n_voxels + idx] = 0;
}
}
}
}
__global__ void voxels_average_old(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx] && regions[rid].avg < regions[rid].pr_avg_min) {
voxels[idx] = (regions[rid].pr_avg_min - regions[rid].avg);
} else if (voxel_regions[rid*n_voxels + idx] && regions[rid].avg > regions[rid].pr_avg_max) {
voxels[idx] = (regions[rid].pr_avg_max - regions[rid].avg);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_average_objective(Region *regions, char *voxel_regions, int n_voxels, int rid, float penalty, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
voxels[idx] = penalty*voxel_regions[rid*n_voxels + idx];
}
}
__global__ void reduce_gradient(double *voxels, int n_voxels, int n_gradients) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
double influence = 0;
for (int i = 0; i < n_gradients; i++) {
influence += voxels[i*n_voxels + idx];
}
voxels[idx] = influence;
}
}
double penalty(Plan plan) {
double penalty = 0;
for (int i = 0; i < plan.n_regions; i++) {
Region region = plan.regions[i];
if (region.is_optimized) {
if (region.pr_min > 0 &&
region.min < region.pr_min) {
penalty += region.pr_min - region.min;
}
if (region.pr_max > 0 &&
region.max > region.pr_max) {
penalty += region.max - region.pr_max;
}
if (region.pr_avg_min > 0 &&
region.avg < region.pr_avg_min) {
penalty += region.pr_avg_min - region.avg;
}
if (region.pr_avg_max > 0 &&
region.avg > region.pr_avg_max) {
penalty += region.avg - region.pr_avg_max;
}
}
}
return penalty;
}
double objective(Plan plan) {
double objective = 1;
for (int i = 0; i < plan.n_regions; i++) {
Region region = plan.regions[i];
if (region.is_optimized) {
objective *= region.f;
if (region.is_ptv) {
objective *= region.v_f;
}
}
}
return objective;
}
//double objective(Plan plan) {
// double objective = 0;
//
// for (int i = 0; i < plan.n_regions; i++) {
// Region region = plan.regions[i];
// if (region.is_optimized) {
// objective += region.avg*region.n_voxels;
// }
// }
// return objective/plan.n_voxels;
//}
void add_grad(Plan plan, double *a, double *b, double step) {
for (int i = 0; i < plan.n_beamlets; i++) {
a[i] += step*b[i];
}
free(b);
}
void vector_stats(const char *name, double *vector, int n_values) {
double min = 1e10, max = 0, avg = 0;
for (int i = 0; i < n_values; i++) {
if (vector[i] < min) {
min = vector[i];
}
if (vector[i] > max) {
max = vector[i];
}
avg += vector[i];
}
avg /= n_values;
printf("%s: %f %f %f\n", name, min, max, avg);
}
int descend(Plan plan, double *d_momentum, float step, int rid_sll, int rid_slr) {
int block = 512;
int grid = (plan.n_voxels + block - 1)/block;
double *d_voxels;
int gradients_per_region = 3; // Warning, hardcoded!
cudaCheck(hipMalloc(&d_voxels, gradients_per_region*plan.n_regions*plan.n_voxels*sizeof(double)));
// Hardcoded objective function gradients
int n_gradients = 2;
float penalty = -0.0000;
hipLaunchKernelGGL(( voxels_average_objective), dim3(grid), dim3(block), 0, 0, plan.d_regions, plan.d_voxel_regions, plan.n_voxels, rid_sll, penalty, &(d_voxels[0]));
hipLaunchKernelGGL(( voxels_average_objective), dim3(grid), dim3(block), 0, 0, plan.d_regions, plan.d_voxel_regions, plan.n_voxels, rid_slr, penalty, &(d_voxels[plan.n_voxels]));
int offset = n_gradients*plan.n_voxels;
for (int i = 0; i < plan.n_regions; i++) {
Region region = plan.regions[i];
if (region.is_optimized) {
hipLaunchKernelGGL(( voxels_eud), dim3(grid), dim3(block), 0, 0, plan.d_regions, plan.d_voxel_regions, plan.d_doses, plan.n_voxels, i, &(d_voxels[offset]));
offset += plan.n_voxels;
n_gradients++;
if (region.is_ptv) {
offset += plan.n_voxels;
n_gradients++;
}
}
}
//printf("n_gradients: %d\n", n_gradients);
hipLaunchKernelGGL(( reduce_gradient), dim3(grid), dim3(block), 0, 0, d_voxels, plan.n_voxels, n_gradients);
double *d_gradients;
cudaCheck(hipMalloc(&d_gradients, n_gradients*plan.n_beamlets*sizeof(double)));
SparseMatrix spm = plan.spm;
//cusparseCheck(hipsparseDcsrmm(spm.handle, HIPSPARSE_OPERATION_TRANSPOSE, spm.n_rows, n_gradients, spm.n_cols, spm.n_nz, &alpha, spm.descr, spm.d_vals, spm.d_rows, spm.d_cols, d_voxels, spm.n_rows, &beta, d_gradients, spm.n_cols));
//cusparseCheck(hipsparseDcsrmm(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, spm.n_cols, n_gradients, spm.n_rows, spm.n_nz, &alpha, spm.descr, spm.d_vals_t, spm.d_rows_t, spm.d_cols_t, d_voxels, spm.n_rows, &beta, d_gradients, spm.n_cols));
//cusparseCheck(hipsparseDcsrmv(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, spm.n_cols, spm.n_rows, spm.n_nz, &alpha, spm.descr, spm.d_vals_t, spm.d_rows_t, spm.d_cols_t, d_voxels, &beta, d_gradients));
hipsparseDnVecDescr_t voxels_descr;
hipsparseDnVecDescr_t gradient_descr;
hipsparseCreateDnVec(&voxels_descr, plan.n_voxels, d_voxels, HIP_R_64F);
hipsparseCreateDnVec(&gradient_descr, plan.n_beamlets, d_gradients, HIP_R_64F);
double alpha = 1.0, beta = 0.0;
void *p_buffer;
size_t p_buffer_size = 0;
cusparseCheck(hipsparseSpMV_bufferSize(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr_t, voxels_descr, &beta,
gradient_descr, HIP_R_64F, HIPSPARSE_CSRMV_ALG1, &p_buffer_size));
cudaCheck(hipMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(hipsparseSpMV(spm.handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr_t, voxels_descr, &beta,
gradient_descr, HIP_R_64F, HIPSPARSE_CSRMV_ALG1, p_buffer));
//double *gradients = (double *) malloc(n_gradients*plan.n_beamlets*sizeof(double));;
//cudaCheck(hipMemcpy(gradients, d_gradients, n_gradients*plan.n_beamlets*sizeof(double), hipMemcpyDeviceToHost));
//vector_stats("obj_1", &gradients[0], plan.n_beamlets);
//vector_stats("obj_2", &gradients[plan.n_beamlets], plan.n_beamlets);
//offset = 2*plan.n_beamlets;
//for (int i = 0; i < plan.n_regions; i++) {
// Region region = plan.regions[i];
// if (region.is_optimized) {
// printf("gpu %s\n", region.name);
// if (region.pr_avg_min > 0 || region.pr_avg_max > 0) {
// vector_stats("grad_avg", &gradients[offset], plan.n_beamlets);
// offset += plan.n_beamlets;
// }
// if (region.pr_min > 0) {
// vector_stats("grad_min", &gradients[offset], plan.n_beamlets);
// offset += plan.n_beamlets;
// }
// if (region.pr_max > 0) {
// vector_stats("grad_max", &gradients[offset], plan.n_beamlets);
// offset += plan.n_beamlets;
// }
// }
//}
grid = (plan.n_beamlets + block - 1)/block;
hipLaunchKernelGGL(( apply_gradients), dim3(grid), dim3(block), 0, 0, d_gradients, d_momentum, plan.n_beamlets, n_gradients, step, plan.d_fluence);
cudaCheck(hipFree(d_gradients));
cudaCheck(hipFree(d_voxels));
return n_gradients;
}
void optimize_gpu(Plan plan, int rid_sll, int rid_slr, float gurobi_avg_sll, float gurobi_avg_slr, float stop_ratio) {
cudaCheck(hipMemcpy(plan.d_regions, plan.regions, plan.n_regions*sizeof(Region), hipMemcpyHostToDevice));
//cudaCheck(hipMemset(plan.d_fluence, 0, plan.n_beamlets*sizeof(double)));
double *d_momentum;
int gradients_per_region = 3; // Warning, hardcoded!
cudaCheck(hipMalloc(&d_momentum, gradients_per_region*plan.n_regions*plan.n_beamlets*sizeof(double)));
cudaCheck(hipMemset(d_momentum, 0, gradients_per_region*plan.n_regions*plan.n_beamlets*sizeof(double)));
plan.compute_dose();
plan.stats();
cudaCheck(hipMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), hipMemcpyDeviceToHost));
printf("Initial solution:\n");
plan.print_table();
//exit(0);
// TODO: Inicializar la fluencia a 0 rompe el algoritmo por los EUD. Hay que revisarlo o inicializar a 0.1 o algo.
//cudaCheck(hipMemset(plan.d_fluence, 0, plan.n_beamlets*sizeof(double)));
//plan.compute_dose();
//plan.stats();
//cudaCheck(hipMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), hipMemcpyDeviceToHost));
//plan.print_table();
//float step = 2e-9;
//float decay = 1e-7;
//float min_step = 1e-9;
float step = 2e-7;
float decay = 1e-7;
float min_step = 1e-1;
double start_time = get_time_s();
double current_time;
double last_pen = 0;
double last_obj = 0;
double last_obj2 = 0;
int it = 0;
while (running && get_time_s() - start_time < 60) {
descend(plan, d_momentum, step, rid_sll, rid_slr);
cudaCheck(hipMemcpy(plan.fluence, plan.d_fluence, plan.n_beamlets*sizeof(double), hipMemcpyDeviceToHost));
plan.smooth_cpu();
cudaCheck(hipMemcpy(plan.d_fluence, plan.fluence, plan.n_beamlets*sizeof(double), hipMemcpyHostToDevice));
plan.compute_dose();
plan.stats();
if (it % 100 == 0) {
cudaCheck(hipMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), hipMemcpyDeviceToHost));
current_time = get_time_s();
double pen = penalty(plan);
double obj = plan.regions[rid_sll].avg + plan.regions[rid_slr].avg;
double obj2 = objective(plan);
printf("\n[%.3f] Iteration %d %e\n", current_time - start_time, it, step);
//printf("penalty: %9.6f (%9.6f percent)\n", pen, ((pen-last_pen)*100/last_pen));
//printf(" obj: %9.6f (%9.6f percent)\n", obj, ((obj-last_obj)*100/last_obj));
printf("penalty: %9.6f (%9.6f)\n", pen, pen-last_pen);
printf(" obj: %9.6f (%9.6f)\n", obj, obj-last_obj);
printf(" obj2: %9.24f %9.24f %9.24f\n", obj2, obj2-last_obj2, (obj2-last_obj2)/obj2);
plan.print_table();
//if (abs(obj2-last_obj2)/obj2 < stop_ratio) {
// break;
//}
last_pen = pen;
last_obj = obj;
last_obj2 = obj2;
if (it % 10000 == 0) {
const char* out = "x_temp.txt";
cudaCheck(hipMemcpy(plan.fluence, plan.d_fluence, plan.n_beamlets*sizeof(double), hipMemcpyDeviceToHost));
FILE *f = fopen(out, "w");
for (int i = 0; i < plan.n_beamlets; i++) {
fprintf(f, "%.10e\n", plan.fluence[i]);
}
fclose(f);
printf("Last fluence written to %s\n", out);
}
}
//if (it % 100000 == 0) {
// step /= 10;
//}
if (step > min_step)
step = step/(1 + decay*it);
it++;
//if (it == 10000)
// break;
}
cudaCheck(hipMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), hipMemcpyDeviceToHost));
double elapsed = get_time_s() - start_time;
printf("\nRan %d iterations in %.4f seconds (%.4f sec/it) \n", it, elapsed, elapsed/it);
printf("penalty: %f\n", penalty(plan));
printf(" obj: %f\n", plan.regions[rid_sll].avg + plan.regions[rid_slr].avg);
plan.print_table();
cudaCheck(hipFree(d_momentum));
}
int main(int argc, char **argv) {
signal(SIGINT, interrupt_handler);
int plan_n = atoi(argv[1]);
const char* plan_path = argv[2];
const char* out_path = argv[3];
const char* fluence_path;
const char* fluence_prefix;
float stop_ratio = 1e-5;
if (argc > 4) {
fluence_path = argv[4];
fluence_prefix = argv[5];
} else {
// We use the starting plan from Eclipse
fluence_path = plan_path;
fluence_prefix = "x_PARETO";
}
Plan plan = {};
plan.load(plan_path, fluence_path, fluence_prefix);
int rid_sll, rid_slr;
float gurobi_avg_sll, gurobi_avg_slr;
if (plan_n == 3) {
rid_sll = 5;
rid_slr = 6;
plan.regions[ 0].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 1].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 2].set_targets(false, -1, -1, -1, 60, 60, 10, 5);
plan.regions[ 3].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 4].set_targets(false, -1, -1, -1, 50, 50, 10, 5);
plan.regions[ 5].set_targets(false, -1, -1, 26, -1, 1, 1, 1);
plan.regions[ 6].set_targets(false, -1, -1, 26, -1, 1, 1, 1);
plan.regions[ 7].set_targets(false, -1, -1, -1, 70, 70, 10, 5);
plan.regions[ 8].set_targets(false, -1, -1, -1, 74.25, 74.25, 40, 5);
plan.regions[ 9].set_targets( true, 60.75, 66.15, 68.85, 74.25, 67.50, -40, 50);
plan.regions[10].set_targets( true, 54.00, 58.80, 61.20, 66.00, 60.00, -50, 100);
plan.regions[11].set_targets( true, 48.60, 52.92, 55.08, 59.40, 54.00, -40, 100);
gurobi_avg_sll = -1;
gurobi_avg_slr = -1;
} else if (plan_n == 4) {
rid_sll = 2;
rid_slr = 1;
plan.regions[ 0].set_targets(false, -1, -1, -1, 70, 70, 10, 5);
plan.regions[ 1].set_targets(false, -1, -1, 26, -1, 1, 1, 5);
plan.regions[ 2].set_targets(false, -1, -1, 26, -1, 1, 1, 5);
plan.regions[ 3].set_targets(false, -1, -1, -1, 50, 50, 10, 5);
plan.regions[ 4].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 5].set_targets( true, 59.40, 64.67, 67.32, 72.60, 66.00, -40, 100);
plan.regions[ 6].set_targets( true, 53.46, 58.21, 60.59, 65.34, 59.40, -40, 100);
plan.regions[ 7].set_targets(false, -1, -1, -1, 60, 60, 10, 5);
plan.regions[ 8].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 9].set_targets(false, -1, -1, -1, 74.25, 74.25, 40, 5);
plan.regions[10].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
gurobi_avg_sll = -1;
gurobi_avg_slr = -1;
} else if (plan_n == 5) {
rid_sll = 3;
rid_slr = 4;
plan.regions[ 0].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 1].set_targets(false, -1, -1, -1, 74.25, 74.25, 40, 5);
plan.regions[ 2].set_targets(false, -1, -1, -1, 70, 70, 10, 5);
plan.regions[ 3].set_targets(false, -1, -1, 26, -1, 26, 1, 5);
plan.regions[ 4].set_targets(false, -1, -1, 26, -1, 26, 1, 5);
plan.regions[ 5].set_targets(false, -1, -1, -1, 50, 50, 10, 5);
plan.regions[ 6].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 7].set_targets(false, -1, -1, -1, 60, 60, 10, 5);
plan.regions[ 8].set_targets( true, 48.60, 52.92, 55.08, 59.40, 54.00, -100, 150);
plan.regions[ 9].set_targets( true, 54.00, 58.80, 61.20, 66.00, 60.00, -100, 150);
plan.regions[10].set_targets( true, 59.40, 64.67, 67.32, 72.60, 66.00, -60, 50);
plan.regions[11].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
gurobi_avg_sll = -1;
gurobi_avg_slr = -1;
}
optimize_gpu(plan, rid_sll, rid_slr, gurobi_avg_sll, gurobi_avg_slr, stop_ratio);
cudaCheck(hipMemcpy(plan.fluence, plan.d_fluence, plan.n_beamlets*sizeof(double), hipMemcpyDeviceToHost));
FILE *f = fopen(out_path, "w");
for (int i = 0; i < plan.n_beamlets; i++) {
fprintf(f, "%.10e\n", plan.fluence[i]);
}
fclose(f);
printf("Last fluence written to %s\n", out_path);
}
| 2a0428665e28f2bcbfe3f0ce121f1881a5073ea2.cu | #include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
#include <signal.h>
//#include "mkl.h"
#include <cuda_runtime.h>
#include <cusparse.h>
static volatile int running = 1;
void interrupt_handler(int signal) {
running = 0;
}
#define cudaCheck(result) __cudaCheck(result, __FILE__, __LINE__)
inline cudaError_t __cudaCheck(cudaError_t result, const char *file, const int line, bool abort = true) {
if (result != cudaSuccess) {
fprintf(stderr, "CUDA error at %s[%i]: %s\n", file, line, cudaGetErrorString(result));
if (abort) {
exit(result);
}
}
return result;
}
const char* cusparseGetErrorString(cusparseStatus_t result) {
switch (result) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "CUSPARSE_UNKNOWN_ERROR";
}
#define cusparseCheck(result) __cusparseCheck(result, __FILE__, __LINE__)
inline cusparseStatus_t __cusparseCheck(cusparseStatus_t result, const char *file, const int line, bool abort = true) {
if (result != CUSPARSE_STATUS_SUCCESS) {
fprintf(stderr, "CUSPARSE error at %s[%i]: %s\n", file, line, cusparseGetErrorString(result));
if (abort) {
exit(result);
}
}
return result;
}
double get_time_ms() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
return ts.tv_sec * 1000 + ts.tv_nsec / 1000000.0;
} else {
return 0;
}
}
double get_time_s() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
return ts.tv_sec + ts.tv_nsec / 1e9;
} else {
return 0;
}
}
int compare_strings(const void *va, const void *vb) {
char **a = (char **) va;
char **b = (char **) vb;
return strcmp(*a, *b);
}
int read_files(const char *path, const char *pattern, char **files) {
int n_files = 0;
DIR *d = opendir(path);
struct dirent *dir;
if (d) {
while ((dir = readdir(d)) != NULL) {
if (strstr(dir->d_name, pattern) != NULL) {
files[n_files] = (char *) malloc(1000 * sizeof(char));
strcpy(files[n_files], path);
if (path[strlen(path) - 1] != '/') {
strcat(files[n_files], (char *) "/");
}
strcat(files[n_files], dir->d_name);
n_files++;
}
}
closedir(d);
}
qsort(files, n_files, sizeof(char *), compare_strings);
return n_files;
}
struct SparseMatrix {
int n_nz;
int n_rows;
int n_cols;
int *rows;
int *cols;
double *vals;
int *d_rows;
int *d_cols;
double *d_vals;
int *d_rows_t;
int *d_cols_t;
double *d_vals_t;
cusparseHandle_t handle;
cusparseMatDescr_t descr;
cusparseSpMatDescr_t sp_descr;
cusparseSpMatDescr_t sp_descr_t;
void malloc_cpu() {
rows = (int *) malloc(n_nz*sizeof(int));
cols = (int *) malloc(n_nz*sizeof(int));
vals = (double *) malloc(n_nz*sizeof(double));
n_rows = 0;
n_cols = 0;
}
void free_cpu() {
free(rows);
free(cols);
free(vals);
}
void free_gpu() {
cudaCheck(cudaFree(d_rows));
cudaCheck(cudaFree(d_cols));
cudaCheck(cudaFree(d_vals));
cudaCheck(cudaFree(d_rows_t));
cudaCheck(cudaFree(d_cols_t));
cudaCheck(cudaFree(d_vals_t));
}
void copy_to_gpu() {
cudaCheck(cudaMalloc(&d_rows, n_nz*sizeof(int)));
cudaCheck(cudaMalloc(&d_cols, n_nz*sizeof(int)));
cudaCheck(cudaMalloc(&d_vals, n_nz*sizeof(double)));
cudaCheck(cudaMalloc(&d_rows_t, (n_cols+1)*sizeof(int)));
cudaCheck(cudaMalloc(&d_cols_t, n_nz*sizeof(int)));
cudaCheck(cudaMalloc(&d_vals_t, n_nz*sizeof(double)));
cudaCheck(cudaMemcpy(d_rows, rows, n_nz*sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_cols, cols, n_nz*sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_vals, vals, n_nz*sizeof(double), cudaMemcpyHostToDevice));
}
void sort_by_row() {
int *p;
void *p_buffer;
size_t p_buffer_size = 0;
double *sorted_vals;
cudaCheck(cudaMalloc(&sorted_vals, n_nz*sizeof(double)));
cusparseCheck(cusparseXcsrsort_bufferSizeExt(handle, n_rows, n_cols, n_nz, d_rows, d_cols, &p_buffer_size));
cudaCheck(cudaMalloc(&p, n_nz*sizeof(int)));
cudaCheck(cudaMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(cusparseCreateIdentityPermutation(handle, n_nz, p));
cusparseCheck(cusparseXcoosortByRow(handle, n_rows, n_cols, n_nz, d_rows, d_cols, p, p_buffer));
cusparseCheck(cusparseDgthr(handle, n_nz, d_vals, sorted_vals, p, CUSPARSE_INDEX_BASE_ZERO));
cudaCheck(cudaFree(d_vals));
cudaCheck(cudaFree(p));
cudaCheck(cudaFree(p_buffer));
d_vals = sorted_vals;
}
void coo_to_csr() {
int *csr;
cudaCheck(cudaMalloc(&csr, (n_rows+1)*sizeof(int)));
cusparseCheck(cusparseXcoo2csr(handle, d_rows, n_nz, n_rows, csr, CUSPARSE_INDEX_BASE_ZERO));
cudaCheck(cudaFree(d_rows));
d_rows = csr;
cusparseCheck(cusparseCreateCsr(&sp_descr, n_rows, n_cols, n_nz, d_rows, d_cols, d_vals,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F));
}
void transpose_csr() {
void *p_buffer;
size_t p_buffer_size = 0;
cusparseCheck(cusparseCsr2cscEx2_bufferSize(handle, n_rows, n_cols, n_nz, d_vals, d_rows, d_cols,
d_vals_t, d_rows_t, d_cols_t, CUDA_R_64F, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG2, &p_buffer_size));
cudaCheck(cudaMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(cusparseCsr2cscEx2(handle, n_rows, n_cols, n_nz, d_vals, d_rows, d_cols,
d_vals_t, d_rows_t, d_cols_t, CUDA_R_64F, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG2, p_buffer));
cudaCheck(cudaFree(p_buffer));
cusparseCheck(cusparseCreateCsr(&sp_descr_t, n_cols, n_rows, n_nz, d_rows_t, d_cols_t, d_vals_t,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F));
}
void setup_gpu() {
cusparseCheck(cusparseCreate(&handle));
cusparseCheck(cusparseCreateMatDescr(&descr));
cusparseCheck(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
cusparseCheck(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
copy_to_gpu();
sort_by_row();
coo_to_csr();
transpose_csr();
}
};
struct Region {
char *name;
int n_voxels;
double min;
double max;
double avg;
double f; // Objective function evaluation
double eud;
double dF_dEUD;
double sum_alpha;
// Virtual EUD to control PTV overdosage
// Hardcoded to eud + 1 for now
double v_f;
double v_eud;
double v_dF_dEUD;
double v_sum_alpha;
bool is_optimized;
bool is_ptv;
double pr_min;
double pr_max;
double pr_avg_min;
double pr_avg_max;
double *grad_avg;
double pr_eud;
int penalty;
int alpha;
void set_targets(bool t_ptv, double t_min, double t_avg_min, double t_avg_max, double t_max,
double t_eud, int t_alpha, int t_penalty) {
if (t_eud < 0 && t_min < 0 && t_max < 0 &&
t_avg_min < 0 && t_avg_max < 0) {
is_optimized = false;
} else {
is_optimized = true;
is_ptv = t_ptv;
pr_min = t_min;
pr_max = t_max;
pr_avg_min = t_avg_min;
pr_avg_max = t_avg_max;
pr_eud = t_eud;
alpha = t_alpha;
penalty = t_penalty;
f = 0;
v_f = 0;
eud = 0;
v_eud = 0;
dF_dEUD = 0;
v_dF_dEUD = 0;
sum_alpha = 0;
v_sum_alpha = 0;
}
}
};
__inline__ __device__ float warpReduceMin(float val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
float v = __shfl_down_sync(0xffffffff, val, offset);
if (v < val) {
val = v;
}
}
return val;
}
__inline__ __device__ float warpReduceMax(float val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
float v = __shfl_down_sync(0xffffffff, val, offset);
if (v > val) {
val = v;
}
}
return val;
}
template <class T>
__inline__ __device__ T warpReduceSum(T val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += __shfl_down_sync(0xffffffff, val, offset);
}
return val;
}
template <class T>
__inline__ __device__ T blockReduce(T val, T (*warp_reduction)(T), T defval) {
__shared__ T shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warp_reduction(val);
// Write reduced value to shared memory
if (lane == 0) {
shared[wid] = val;
}
__syncthreads();
// Ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : defval;
if (wid == 0) {
val = warp_reduction(val);
}
return val;
}
__global__ void stats_gpu(double *doses, char *voxel_regions, int n_regions, int n_voxels,
Region *regions) {
int rid = blockIdx.x;
float min = 1e10;
float max = 0;
float sum = 0;
double eud = 0;
double v_eud = 0;
double sum_alpha = 0;
double v_sum_alpha = 0;
for (int i = threadIdx.x; i < n_voxels; i += blockDim.x) {
if (voxel_regions[rid*n_voxels + i]) {
float dose = doses[i];
if (dose < min) {
min = dose;
}
if (dose > max) {
max = dose;
}
sum += dose;
if (dose > 0) {
sum_alpha += pow((double) dose, (double) regions[rid].alpha);
if (regions[rid].is_ptv) {
v_sum_alpha += pow((double) dose, (double) -regions[rid].alpha);
}
}
}
}
min = blockReduce<float>(min, warpReduceMin, 1e10);
max = blockReduce<float>(max, warpReduceMax, 0);
sum = blockReduce<float>(sum, warpReduceSum, 0);
sum_alpha = blockReduce<double>(sum_alpha, warpReduceSum, 0);
if (regions[rid].is_ptv) {
v_sum_alpha = blockReduce<double>(v_sum_alpha, warpReduceSum, 0);
}
if (threadIdx.x == 0) {
regions[rid].min = min;
regions[rid].max = max;
regions[rid].avg = sum / regions[rid].n_voxels;
eud = pow(sum_alpha/regions[rid].n_voxels, 1.0/regions[rid].alpha);
regions[rid].sum_alpha = sum_alpha;
regions[rid].eud = eud;
if (regions[rid].is_ptv) {
v_eud = pow(v_sum_alpha/regions[rid].n_voxels, 1.0/-regions[rid].alpha);
regions[rid].v_sum_alpha = v_sum_alpha;
regions[rid].v_eud = v_eud;
}
if (regions[rid].is_optimized) {
int n = regions[rid].penalty;
int pd = regions[rid].pr_eud;
int v_pd = pd + 1; // Hardcoded virtual PTV prescribed dose
if (regions[rid].is_ptv) {
regions[rid].f = 1/(1 + pow(pd/eud, n));
regions[rid].dF_dEUD = (n*regions[rid].f/eud) * pow(pd/eud, n);
// Virtual EUD to control PTV over-dosage
regions[rid].v_f = 1/(1 + pow(v_eud/v_pd, n));
regions[rid].v_dF_dEUD = -(n*regions[rid].v_f/v_eud) * pow(v_eud/v_pd, n);
} else {
regions[rid].f = 1/(1 + pow(eud/pd, n));
regions[rid].dF_dEUD = -(n*regions[rid].f/eud) * pow(eud/pd, n);
}
}
}
}
__global__ void init_fluence(double *fluence, int n_beamlets, double value) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_beamlets) {
fluence[idx] = value;
}
}
__global__ void scale_doses(double *doses, int n_voxels, double dose_grid_scaling) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
doses[idx] *= dose_grid_scaling;
}
}
__global__ void apply_gradients_OLD(double *gradients, double *momentum, int n_beamlets, int n_gradients, float step, double *fluence) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int beta = 0.9;
if (idx < n_beamlets) {
double gradient = 0;
for (int i = 0; i < n_gradients; i++) {
gradient += gradients[i*n_beamlets + idx];
}
momentum[idx] = beta*momentum[idx] + (1-beta)*gradient;
fluence[idx] += step*momentum[idx];
if (fluence[idx] < 0) {
fluence[idx] = 0;
}
if (fluence[idx] > 1) {
fluence[idx] = 1;
}
}
}
//__global__ void apply_gradients(double *gradients, double *momentum, int n_beamlets, int n_gradients, float step, double *fluence) {
// int idx = blockIdx.x*blockDim.x + threadIdx.x;
// int beta = 0.9;
//
// if (idx < n_beamlets) {
// double gradient = 0;
// for (int i = 0; i < n_gradients; i++) {
// momentum[i*n_beamlets + idx] = beta*momentum[i*n_beamlets + idx] + (1-beta)*gradients[i*n_beamlets + idx];
// gradient += momentum[i*n_beamlets + idx];
// }
//
// fluence[idx] += step*gradient;
//
// if (fluence[idx] < 0) {
// fluence[idx] = 0;
// }
// if (fluence[idx] > 0.2) {
// fluence[idx] = 0.2;
// }
// }
//}
__global__ void apply_gradients(double *gradients, double *momentum, int n_beamlets, int n_gradients, float step, double *fluence) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int beta = 0.9;
if (idx < n_beamlets) {
momentum[idx] = beta*momentum[idx] + (1-beta)*gradients[idx];
fluence[idx] += step*momentum[idx];
// Implementación muy shitty
//double threshold = 0.02;
//if (idx > 0) {
// double diff = fluence[idx] - fluence[idx-1];
// if (diff > threshold) {
// fluence[idx] -= 0.01;
// }
//}
//if (idx < n_beamlets - 1) {
// double diff = fluence[idx] - fluence[idx+1];
// if (diff > threshold) {
// fluence[idx] -= 0.01;
// }
//}
// Smoothing cutre :)
//double w = 1e-1;
//if (idx > 0 && idx < n_beamlets - 1) {
// double neigh_avg = (fluence[idx - 1] + fluence[idx] + fluence[idx + 1])/3;
// if (fluence[idx] > neigh_avg) {
// fluence[idx] -= w*abs(fluence[idx] - neigh_avg);
// }
// if (fluence[idx] < neigh_avg) {
// fluence[idx] += w*abs(fluence[idx] - neigh_avg);
// }
//}
if (fluence[idx] < 0) {
fluence[idx] = 0;
}
if (fluence[idx] > 0.3) {
fluence[idx] = 0.3;
}
}
}
#define BEAM_MAP_X 120
#define BEAM_MAP_Y 120
struct Plan {
char *name;
int n_beams;
int n_beamlets;
int *n_beamlets_beam;
int n_voxels;
int n_regions;
double dose_grid_scaling;
Region* regions;
Region* d_regions;
char *voxel_regions;
char *d_voxel_regions;
SparseMatrix spm;
double *fluence;
double *smoothed_fluence;
double *doses;
double *d_fluence;
double *d_doses;
char *files[100];
int *beam_maps;
int *d_beam_maps;
cusparseDnVecDescr_t fluence_descr;
cusparseDnVecDescr_t doses_descr;
void check_line(int result) {
if (result < 0) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to read line.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
char* get_str(char *line, size_t len) {
char *temp = (char *) malloc(len*sizeof(char));
snprintf(temp, len, "%s", line);
temp[strcspn(temp, "\r\n")] = 0; // Remove newline
return temp;
}
int get_int(char *line, char **end) {
return strtoll(line, end, 10);
}
float get_float(char *line, char **end) {
return strtof(line, end);
}
void parse_config(const char *path) {
int n_files = read_files(path, "m_", files);
FILE *f = fopen(files[0], "r");
if (f == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("Loading %s...\n", files[0]);
char *line = NULL;
char *end = NULL;
size_t len = 0;
check_line(getline(&line, &len, f));
name = get_str(line, len);
check_line(getline(&line, &len, f));
n_beams = get_int(line, &end);
n_beamlets = 0;
n_beamlets_beam = (int *)malloc(n_beams * sizeof(int));
for (int i = 0; i < n_beams; i++) {
check_line(getline(&line, &len, f));
int index = get_int(line, &end);
int beamlets = get_int(end, &line);
n_beamlets_beam[index - 1] = beamlets;
n_beamlets += beamlets;
}
check_line(getline(&line, &len, f));
n_voxels = get_int(line, &end);
check_line(getline(&line, &len, f));
dose_grid_scaling = get_float(line, &end);
check_line(getline(&line, &len, f));
n_regions = get_int(line, &end);
regions = (Region *) malloc(n_regions*sizeof(Region));
for (int i = 0; i < n_regions; i++) {
check_line(getline(&line, &len, f));
get_int(line, &end);
char *name = get_str(end + 1, len);
regions[i].name = name;
regions[i].n_voxels = 0;
}
line = NULL;
len = 0;
while (getline(&line, &len, f) != -1) {
fprintf(stderr, "[WARNING] Line not processed: %s", line);
}
fclose(f);
free(files[0]);
cudaCheck(cudaMalloc(&d_regions, n_regions*sizeof(Region)));
}
void parse_voxel_regions(const char *path) {
int n_files = read_files(path, "v_", files);
FILE *f = fopen(files[0], "r");
if (f == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("Loading %s...\n", files[0]);
voxel_regions = (char *) malloc(n_voxels*n_regions*sizeof(char));
char line[1024];
int num = 0;
int offset = 0;
while (fgets(line, sizeof line, f)) {
if (sscanf(line, "%d", &num)) {
for (int i = 0; i < n_regions; i++) {
voxel_regions[offset + i*n_voxels] = num & 1;
num >>= 1;
}
offset++;
} else {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to read voxel regions.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < n_regions; i++) {
for (int j = 0; j < n_voxels; j++) {
if (voxel_regions[i*n_voxels + j]) {
regions[i].n_voxels += 1;
}
}
}
fclose(f);
free(files[0]);
cudaCheck(cudaMalloc(&d_voxel_regions, n_voxels*n_regions*sizeof(char)));
cudaCheck(cudaMemcpy(d_voxel_regions, voxel_regions, n_voxels*n_regions*sizeof(char), cudaMemcpyHostToDevice));
}
void load_spm(const char *path) {
int n_files = read_files(path, "d_", files);
FILE **fp = (FILE **) malloc(n_files*sizeof(FILE *));
for (int i = 0; i < n_files; i++) {
fp[i] = fopen(files[i], "r");
if (fp[i] == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
int n_nz = 0;
int count = fscanf(fp[i], "%d", &n_nz);
spm.n_nz += n_nz;
}
spm.malloc_cpu();
int idx = 0;
int offset = 0;
for (int i = 0; i < n_files; i++) {
printf("Loading %s... ", files[i]);
int n_read = 0;
while (true) {
int row, col;
double val;
int count = fscanf(fp[i], "%d %d %lf", &row, &col, &val);
if(count == EOF || !count) {
break;
}
int new_col = offset + col;
spm.rows[idx] = row;
spm.cols[idx] = new_col;
spm.vals[idx] = val;
idx++;
n_read++;
if (row > spm.n_rows) {
spm.n_rows = row;
}
if (new_col > spm.n_cols) {
spm.n_cols = new_col;
}
}
printf("%d values read.\n", n_read);
offset = spm.n_cols + 1;
fclose(fp[i]);
free(files[i]);
}
spm.n_rows++;
// Sometimes there's missing voxels,
// but we want the dimensions to match for SpMM
if (spm.n_rows < n_voxels) {
spm.n_rows = n_voxels;
}
spm.n_cols++;
free(fp);
}
void load_fluence(const char *path, const char *prefix) {
int n_files = read_files(path, prefix, files);
FILE **fp = (FILE **) malloc(n_files*sizeof(FILE *));
int idx = 0;
for (int i = 0; i < n_files; i++) {
printf("Loading %s... ", files[i]);
fp[i] = fopen(files[i], "r");
if (fp[i] == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
int n_read = 0;
while (true) {
int count = fscanf(fp[i], "%lf", &(fluence[idx]));
if(count == EOF || !count) {
break;
}
idx++;
n_read++;
}
printf("%d values read.\n", n_read);
fclose(fp[i]);
free(files[i]);
}
cudaCheck(cudaMemcpy(d_fluence, fluence, n_beamlets*sizeof(double), cudaMemcpyHostToDevice));
free(fp);
}
void load_coords(const char *path) {
int n_files = read_files(path, "xcoords_", files);
beam_maps = (int *) malloc(n_beams*BEAM_MAP_Y*BEAM_MAP_X*sizeof(int));
for (int i = 0; i < n_beams*BEAM_MAP_Y*BEAM_MAP_X; i++) {
beam_maps[i] = -1;
}
int idx = 0;
FILE **fp = (FILE **) malloc(n_files*sizeof(FILE *));
for (int i = 0; i < n_files; i++) {
fp[i] = fopen(files[i], "r");
if (fp[i] == NULL) {
fprintf(stderr, "ERROR in %s (%s:%d): Unable to open file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("Loading %s... ", files[i]);
char ignored[1024];
int n_read = 0;
while (true) {
int col, row;
int count = fscanf(fp[i], "%d %d", &col, &row);
if(count == EOF) {
break;
} else if (!count) {
fgets(ignored, sizeof(ignored), fp[i]);
} else if (count == 1) {
// Header values, ignored
continue;
} else if (count == 2) {
beam_maps[i*BEAM_MAP_Y*BEAM_MAP_X + col*BEAM_MAP_Y + row] = idx;
n_read++;
idx++;
} else {
fprintf(stderr, "ERROR in %s (%s:%d): While reading coordinate file.\n",
__func__, __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
printf("%d coordinates read.\n", n_read);
fclose(fp[i]);
free(files[i]);
}
//cudaCheck(cudaMemcpy(d_beam_maps, beam_maps,
// n_beams*BEAM_MAP_Y*BEAM_MAP_X*sizeof(int), cudaMemcpyHostToDevice));
free(fp);
}
void init_fluence(float value) {
for (int i = 0; i < n_beamlets; i++) {
fluence[i] = value;
}
cudaCheck(cudaMemcpy(d_fluence, fluence, n_beamlets*sizeof(double), cudaMemcpyHostToDevice));
}
void print() {
printf("Name: %s\n", name);
printf("Number of beams: %d\n", n_beams);
for (int i = 0; i < n_beams; i++) {
printf(" Beam %d: %d beamlets\n", i + 1, n_beamlets_beam[i]);
}
printf("Total: %d beamlets\n", n_beamlets);
printf("Number of voxels: %d\n", n_voxels);
printf("Dose Grid Scaling: %e\n", dose_grid_scaling);
printf("Number of regions: %d\n", n_regions);
for (int i = 0; i < n_regions; i++) {
printf(" Region %2d (%4d): %-16s %8d voxels\n", i, (int) pow(2, i), regions[i].name, regions[i].n_voxels);
}
printf("Dose matrix: %d x %d with %d nonzeros.\n", spm.n_rows, spm.n_cols, spm.n_nz);
}
void compute_dose() {
//cudaCheck(cudaMemcpy(d_fluence, fluence, n_beamlets*sizeof(double), cudaMemcpyHostToDevice));
//memset(doses, 0, n_voxels*sizeof(*doses));
cudaCheck(cudaMemset(d_doses, 0, n_voxels*sizeof(*d_doses)));
//cusparseCheck(cusparseDcsrmv(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, spm.n_rows, spm.n_cols, spm.n_nz, &alpha, spm.descr, spm.d_vals, spm.d_rows, spm.d_cols, d_fluence, &beta, d_doses));
double alpha = 1.0, beta = 0.0;
void *p_buffer;
size_t p_buffer_size = 0;
cusparseCheck(cusparseSpMV_bufferSize(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr, fluence_descr, &beta,
doses_descr, CUDA_R_64F, CUSPARSE_CSRMV_ALG1, &p_buffer_size));
cudaCheck(cudaMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(cusparseSpMV(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr, fluence_descr, &beta,
doses_descr, CUDA_R_64F, CUSPARSE_CSRMV_ALG1, p_buffer));
int block = 512;
int grid = (n_voxels + block - 1)/block;
scale_doses<<<grid, block>>>(d_doses, n_voxels, dose_grid_scaling);
//cudaCheck(cudaMemcpy(doses, d_doses, n_voxels*sizeof(double), cudaMemcpyDeviceToHost));
}
void stats() {
//cudaCheck(cudaMemcpy(d_regions, regions, n_regions*sizeof(Region), cudaMemcpyHostToDevice));
stats_gpu<<<n_regions, 512>>>(d_doses, d_voxel_regions, n_regions, n_voxels, d_regions);
//cudaCheck(cudaMemcpy(regions, d_regions, n_regions*sizeof(Region), cudaMemcpyDeviceToHost));
}
void print_table() {
printf(" Region Min Avg Max EUD dF_dEUD v_EUD v_dF_dEUD f v_f\n");
for (int i = 0; i < n_regions; i++) {
if (regions[i].is_optimized) {
printf("%-15s %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf %11.6lf\n", regions[i].name, regions[i].min, regions[i].avg, regions[i].max, regions[i].eud, regions[i].dF_dEUD, regions[i].v_eud, regions[i].v_dF_dEUD, regions[i].f, regions[i].v_f);
}
}
}
void load(const char *plan_path, const char *fluence_path, const char *fluence_prefix) {
parse_config(plan_path);
parse_voxel_regions(plan_path);
load_spm(plan_path);
spm.setup_gpu();
fluence = (double *) malloc(n_beamlets*sizeof(double));
smoothed_fluence = (double *) malloc(n_beamlets*sizeof(double));
cudaCheck(cudaMalloc(&d_fluence, n_beamlets*sizeof(double)));
doses = (double *) malloc(n_voxels*sizeof(double));
cudaCheck(cudaMalloc(&d_doses, n_voxels*sizeof(double)));
load_coords(plan_path);
load_fluence(fluence_path, fluence_prefix);
//init_fluence(1e-2);
print();
cusparseCreateDnVec(&fluence_descr, n_beamlets, d_fluence, CUDA_R_64F);
cusparseCreateDnVec(&doses_descr, n_voxels, d_doses, CUDA_R_64F);
}
void smooth_cpu() {
int n_neighbors = 8;
int sum_weights = 1000;
int *neighbors = (int *) malloc(n_neighbors*sizeof(int));
for (int i = 0; i < n_beams; i++) {
for (int y = 0; y < BEAM_MAP_Y; y++) {
for (int x = 0; x < BEAM_MAP_X; x++) {
int offset = i*BEAM_MAP_Y*BEAM_MAP_X;
int idx = beam_maps[offset + BEAM_MAP_Y*y + x];
float center_weight = sum_weights - n_neighbors;
if (idx >= 0) {
smoothed_fluence[idx] = 0;
neighbors[0] = beam_maps[offset + BEAM_MAP_Y*(y-1) + (x-1)];
neighbors[1] = beam_maps[offset + BEAM_MAP_Y*(y-1) + (x )];
neighbors[2] = beam_maps[offset + BEAM_MAP_Y*(y-1) + (x+1)];
neighbors[3] = beam_maps[offset + BEAM_MAP_Y*(y ) + (x-1)];
neighbors[4] = beam_maps[offset + BEAM_MAP_Y*(y ) + (x+1)];
neighbors[5] = beam_maps[offset + BEAM_MAP_Y*(y+1) + (x-1)];
neighbors[6] = beam_maps[offset + BEAM_MAP_Y*(y+1) + (x )];
neighbors[7] = beam_maps[offset + BEAM_MAP_Y*(y+1) + (x+1)];
//if (neighbors[3] < 0 || neighbors[4] < 0) {
// // This is a border beamlet, ignore other rows
// neighbors[0] = -1;
// neighbors[1] = -1;
// neighbors[2] = -1;
// neighbors[5] = -1;
// neighbors[6] = -1;
// neighbors[7] = -1;
//}
for (int j = 0; j < n_neighbors; j++) {
if (neighbors[j] >= 0) {
smoothed_fluence[idx] += fluence[neighbors[j]];
} else {
center_weight += 0.8;
}
}
smoothed_fluence[idx] += center_weight*fluence[idx];
smoothed_fluence[idx] /= sum_weights;
}
}
}
}
for (int i = 0; i < n_beamlets; i++) {
fluence[i] = smoothed_fluence[i];
}
free(neighbors);
}
};
__global__ void voxels_min(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx]) {
voxels[idx] = (doses[idx] < regions[rid].pr_min);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_min_old(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx] && doses[idx] < regions[rid].pr_min) {
voxels[idx] = (regions[rid].pr_min - doses[idx]);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_max(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx]) {
voxels[idx] = -1*(doses[idx] > regions[rid].pr_max);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_max_old(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx] && doses[idx] > regions[rid].pr_max) {
voxels[idx] = (regions[rid].pr_max - doses[idx]);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_average(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (regions[rid].avg < regions[rid].pr_avg_min) {
voxels[idx] = voxel_regions[rid*n_voxels + idx];
} else if (regions[rid].avg > regions[rid].pr_avg_max) {
voxels[idx] = -voxel_regions[rid*n_voxels + idx];
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_eud(Region *regions, char *voxel_regions, double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
Region r = regions[rid];
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx]) {
double dEUD_dd = r.eud*pow(doses[idx], r.alpha - 1)/r.sum_alpha;
voxels[idx] = r.dF_dEUD * dEUD_dd;
if (r.is_ptv) {
dEUD_dd = r.v_eud*pow(doses[idx], -r.alpha - 1)/r.v_sum_alpha;
voxels[n_voxels + idx] = r.v_dF_dEUD * dEUD_dd;
}
} else {
voxels[idx] = 0;
if (r.is_ptv) {
voxels[n_voxels + idx] = 0;
}
}
}
}
__global__ void voxels_average_old(Region *regions, char *voxel_regions,double *doses, int n_voxels, int rid, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
if (voxel_regions[rid*n_voxels + idx] && regions[rid].avg < regions[rid].pr_avg_min) {
voxels[idx] = (regions[rid].pr_avg_min - regions[rid].avg);
} else if (voxel_regions[rid*n_voxels + idx] && regions[rid].avg > regions[rid].pr_avg_max) {
voxels[idx] = (regions[rid].pr_avg_max - regions[rid].avg);
} else {
voxels[idx] = 0;
}
}
}
__global__ void voxels_average_objective(Region *regions, char *voxel_regions, int n_voxels, int rid, float penalty, double *voxels) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
voxels[idx] = penalty*voxel_regions[rid*n_voxels + idx];
}
}
__global__ void reduce_gradient(double *voxels, int n_voxels, int n_gradients) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n_voxels) {
double influence = 0;
for (int i = 0; i < n_gradients; i++) {
influence += voxels[i*n_voxels + idx];
}
voxels[idx] = influence;
}
}
double penalty(Plan plan) {
double penalty = 0;
for (int i = 0; i < plan.n_regions; i++) {
Region region = plan.regions[i];
if (region.is_optimized) {
if (region.pr_min > 0 &&
region.min < region.pr_min) {
penalty += region.pr_min - region.min;
}
if (region.pr_max > 0 &&
region.max > region.pr_max) {
penalty += region.max - region.pr_max;
}
if (region.pr_avg_min > 0 &&
region.avg < region.pr_avg_min) {
penalty += region.pr_avg_min - region.avg;
}
if (region.pr_avg_max > 0 &&
region.avg > region.pr_avg_max) {
penalty += region.avg - region.pr_avg_max;
}
}
}
return penalty;
}
double objective(Plan plan) {
double objective = 1;
for (int i = 0; i < plan.n_regions; i++) {
Region region = plan.regions[i];
if (region.is_optimized) {
objective *= region.f;
if (region.is_ptv) {
objective *= region.v_f;
}
}
}
return objective;
}
//double objective(Plan plan) {
// double objective = 0;
//
// for (int i = 0; i < plan.n_regions; i++) {
// Region region = plan.regions[i];
// if (region.is_optimized) {
// objective += region.avg*region.n_voxels;
// }
// }
// return objective/plan.n_voxels;
//}
void add_grad(Plan plan, double *a, double *b, double step) {
for (int i = 0; i < plan.n_beamlets; i++) {
a[i] += step*b[i];
}
free(b);
}
void vector_stats(const char *name, double *vector, int n_values) {
double min = 1e10, max = 0, avg = 0;
for (int i = 0; i < n_values; i++) {
if (vector[i] < min) {
min = vector[i];
}
if (vector[i] > max) {
max = vector[i];
}
avg += vector[i];
}
avg /= n_values;
printf("%s: %f %f %f\n", name, min, max, avg);
}
int descend(Plan plan, double *d_momentum, float step, int rid_sll, int rid_slr) {
int block = 512;
int grid = (plan.n_voxels + block - 1)/block;
double *d_voxels;
int gradients_per_region = 3; // Warning, hardcoded!
cudaCheck(cudaMalloc(&d_voxels, gradients_per_region*plan.n_regions*plan.n_voxels*sizeof(double)));
// Hardcoded objective function gradients
int n_gradients = 2;
float penalty = -0.0000;
voxels_average_objective<<<grid, block>>>(plan.d_regions, plan.d_voxel_regions, plan.n_voxels, rid_sll, penalty, &(d_voxels[0]));
voxels_average_objective<<<grid, block>>>(plan.d_regions, plan.d_voxel_regions, plan.n_voxels, rid_slr, penalty, &(d_voxels[plan.n_voxels]));
int offset = n_gradients*plan.n_voxels;
for (int i = 0; i < plan.n_regions; i++) {
Region region = plan.regions[i];
if (region.is_optimized) {
voxels_eud<<<grid, block>>>(plan.d_regions, plan.d_voxel_regions, plan.d_doses, plan.n_voxels, i, &(d_voxels[offset]));
offset += plan.n_voxels;
n_gradients++;
if (region.is_ptv) {
offset += plan.n_voxels;
n_gradients++;
}
}
}
//printf("n_gradients: %d\n", n_gradients);
reduce_gradient<<<grid, block>>>(d_voxels, plan.n_voxels, n_gradients);
double *d_gradients;
cudaCheck(cudaMalloc(&d_gradients, n_gradients*plan.n_beamlets*sizeof(double)));
SparseMatrix spm = plan.spm;
//cusparseCheck(cusparseDcsrmm(spm.handle, CUSPARSE_OPERATION_TRANSPOSE, spm.n_rows, n_gradients, spm.n_cols, spm.n_nz, &alpha, spm.descr, spm.d_vals, spm.d_rows, spm.d_cols, d_voxels, spm.n_rows, &beta, d_gradients, spm.n_cols));
//cusparseCheck(cusparseDcsrmm(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, spm.n_cols, n_gradients, spm.n_rows, spm.n_nz, &alpha, spm.descr, spm.d_vals_t, spm.d_rows_t, spm.d_cols_t, d_voxels, spm.n_rows, &beta, d_gradients, spm.n_cols));
//cusparseCheck(cusparseDcsrmv(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, spm.n_cols, spm.n_rows, spm.n_nz, &alpha, spm.descr, spm.d_vals_t, spm.d_rows_t, spm.d_cols_t, d_voxels, &beta, d_gradients));
cusparseDnVecDescr_t voxels_descr;
cusparseDnVecDescr_t gradient_descr;
cusparseCreateDnVec(&voxels_descr, plan.n_voxels, d_voxels, CUDA_R_64F);
cusparseCreateDnVec(&gradient_descr, plan.n_beamlets, d_gradients, CUDA_R_64F);
double alpha = 1.0, beta = 0.0;
void *p_buffer;
size_t p_buffer_size = 0;
cusparseCheck(cusparseSpMV_bufferSize(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr_t, voxels_descr, &beta,
gradient_descr, CUDA_R_64F, CUSPARSE_CSRMV_ALG1, &p_buffer_size));
cudaCheck(cudaMalloc(&p_buffer, p_buffer_size*sizeof(char)));
cusparseCheck(cusparseSpMV(spm.handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, spm.sp_descr_t, voxels_descr, &beta,
gradient_descr, CUDA_R_64F, CUSPARSE_CSRMV_ALG1, p_buffer));
//double *gradients = (double *) malloc(n_gradients*plan.n_beamlets*sizeof(double));;
//cudaCheck(cudaMemcpy(gradients, d_gradients, n_gradients*plan.n_beamlets*sizeof(double), cudaMemcpyDeviceToHost));
//vector_stats("obj_1", &gradients[0], plan.n_beamlets);
//vector_stats("obj_2", &gradients[plan.n_beamlets], plan.n_beamlets);
//offset = 2*plan.n_beamlets;
//for (int i = 0; i < plan.n_regions; i++) {
// Region region = plan.regions[i];
// if (region.is_optimized) {
// printf("gpu %s\n", region.name);
// if (region.pr_avg_min > 0 || region.pr_avg_max > 0) {
// vector_stats("grad_avg", &gradients[offset], plan.n_beamlets);
// offset += plan.n_beamlets;
// }
// if (region.pr_min > 0) {
// vector_stats("grad_min", &gradients[offset], plan.n_beamlets);
// offset += plan.n_beamlets;
// }
// if (region.pr_max > 0) {
// vector_stats("grad_max", &gradients[offset], plan.n_beamlets);
// offset += plan.n_beamlets;
// }
// }
//}
grid = (plan.n_beamlets + block - 1)/block;
apply_gradients<<<grid, block>>>(d_gradients, d_momentum, plan.n_beamlets, n_gradients, step, plan.d_fluence);
cudaCheck(cudaFree(d_gradients));
cudaCheck(cudaFree(d_voxels));
return n_gradients;
}
void optimize_gpu(Plan plan, int rid_sll, int rid_slr, float gurobi_avg_sll, float gurobi_avg_slr, float stop_ratio) {
cudaCheck(cudaMemcpy(plan.d_regions, plan.regions, plan.n_regions*sizeof(Region), cudaMemcpyHostToDevice));
//cudaCheck(cudaMemset(plan.d_fluence, 0, plan.n_beamlets*sizeof(double)));
double *d_momentum;
int gradients_per_region = 3; // Warning, hardcoded!
cudaCheck(cudaMalloc(&d_momentum, gradients_per_region*plan.n_regions*plan.n_beamlets*sizeof(double)));
cudaCheck(cudaMemset(d_momentum, 0, gradients_per_region*plan.n_regions*plan.n_beamlets*sizeof(double)));
plan.compute_dose();
plan.stats();
cudaCheck(cudaMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), cudaMemcpyDeviceToHost));
printf("Initial solution:\n");
plan.print_table();
//exit(0);
// TODO: Inicializar la fluencia a 0 rompe el algoritmo por los EUD. Hay que revisarlo o inicializar a 0.1 o algo.
//cudaCheck(cudaMemset(plan.d_fluence, 0, plan.n_beamlets*sizeof(double)));
//plan.compute_dose();
//plan.stats();
//cudaCheck(cudaMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), cudaMemcpyDeviceToHost));
//plan.print_table();
//float step = 2e-9;
//float decay = 1e-7;
//float min_step = 1e-9;
float step = 2e-7;
float decay = 1e-7;
float min_step = 1e-1;
double start_time = get_time_s();
double current_time;
double last_pen = 0;
double last_obj = 0;
double last_obj2 = 0;
int it = 0;
while (running && get_time_s() - start_time < 60) {
descend(plan, d_momentum, step, rid_sll, rid_slr);
cudaCheck(cudaMemcpy(plan.fluence, plan.d_fluence, plan.n_beamlets*sizeof(double), cudaMemcpyDeviceToHost));
plan.smooth_cpu();
cudaCheck(cudaMemcpy(plan.d_fluence, plan.fluence, plan.n_beamlets*sizeof(double), cudaMemcpyHostToDevice));
plan.compute_dose();
plan.stats();
if (it % 100 == 0) {
cudaCheck(cudaMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), cudaMemcpyDeviceToHost));
current_time = get_time_s();
double pen = penalty(plan);
double obj = plan.regions[rid_sll].avg + plan.regions[rid_slr].avg;
double obj2 = objective(plan);
printf("\n[%.3f] Iteration %d %e\n", current_time - start_time, it, step);
//printf("penalty: %9.6f (%9.6f percent)\n", pen, ((pen-last_pen)*100/last_pen));
//printf(" obj: %9.6f (%9.6f percent)\n", obj, ((obj-last_obj)*100/last_obj));
printf("penalty: %9.6f (%9.6f)\n", pen, pen-last_pen);
printf(" obj: %9.6f (%9.6f)\n", obj, obj-last_obj);
printf(" obj2: %9.24f %9.24f %9.24f\n", obj2, obj2-last_obj2, (obj2-last_obj2)/obj2);
plan.print_table();
//if (abs(obj2-last_obj2)/obj2 < stop_ratio) {
// break;
//}
last_pen = pen;
last_obj = obj;
last_obj2 = obj2;
if (it % 10000 == 0) {
const char* out = "x_temp.txt";
cudaCheck(cudaMemcpy(plan.fluence, plan.d_fluence, plan.n_beamlets*sizeof(double), cudaMemcpyDeviceToHost));
FILE *f = fopen(out, "w");
for (int i = 0; i < plan.n_beamlets; i++) {
fprintf(f, "%.10e\n", plan.fluence[i]);
}
fclose(f);
printf("Last fluence written to %s\n", out);
}
}
//if (it % 100000 == 0) {
// step /= 10;
//}
if (step > min_step)
step = step/(1 + decay*it);
it++;
//if (it == 10000)
// break;
}
cudaCheck(cudaMemcpy(plan.regions, plan.d_regions, plan.n_regions*sizeof(Region), cudaMemcpyDeviceToHost));
double elapsed = get_time_s() - start_time;
printf("\nRan %d iterations in %.4f seconds (%.4f sec/it) \n", it, elapsed, elapsed/it);
printf("penalty: %f\n", penalty(plan));
printf(" obj: %f\n", plan.regions[rid_sll].avg + plan.regions[rid_slr].avg);
plan.print_table();
cudaCheck(cudaFree(d_momentum));
}
int main(int argc, char **argv) {
signal(SIGINT, interrupt_handler);
int plan_n = atoi(argv[1]);
const char* plan_path = argv[2];
const char* out_path = argv[3];
const char* fluence_path;
const char* fluence_prefix;
float stop_ratio = 1e-5;
if (argc > 4) {
fluence_path = argv[4];
fluence_prefix = argv[5];
} else {
// We use the starting plan from Eclipse
fluence_path = plan_path;
fluence_prefix = "x_PARETO";
}
Plan plan = {};
plan.load(plan_path, fluence_path, fluence_prefix);
int rid_sll, rid_slr;
float gurobi_avg_sll, gurobi_avg_slr;
if (plan_n == 3) {
rid_sll = 5;
rid_slr = 6;
plan.regions[ 0].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 1].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 2].set_targets(false, -1, -1, -1, 60, 60, 10, 5);
plan.regions[ 3].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 4].set_targets(false, -1, -1, -1, 50, 50, 10, 5);
plan.regions[ 5].set_targets(false, -1, -1, 26, -1, 1, 1, 1);
plan.regions[ 6].set_targets(false, -1, -1, 26, -1, 1, 1, 1);
plan.regions[ 7].set_targets(false, -1, -1, -1, 70, 70, 10, 5);
plan.regions[ 8].set_targets(false, -1, -1, -1, 74.25, 74.25, 40, 5);
plan.regions[ 9].set_targets( true, 60.75, 66.15, 68.85, 74.25, 67.50, -40, 50);
plan.regions[10].set_targets( true, 54.00, 58.80, 61.20, 66.00, 60.00, -50, 100);
plan.regions[11].set_targets( true, 48.60, 52.92, 55.08, 59.40, 54.00, -40, 100);
gurobi_avg_sll = -1;
gurobi_avg_slr = -1;
} else if (plan_n == 4) {
rid_sll = 2;
rid_slr = 1;
plan.regions[ 0].set_targets(false, -1, -1, -1, 70, 70, 10, 5);
plan.regions[ 1].set_targets(false, -1, -1, 26, -1, 1, 1, 5);
plan.regions[ 2].set_targets(false, -1, -1, 26, -1, 1, 1, 5);
plan.regions[ 3].set_targets(false, -1, -1, -1, 50, 50, 10, 5);
plan.regions[ 4].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 5].set_targets( true, 59.40, 64.67, 67.32, 72.60, 66.00, -40, 100);
plan.regions[ 6].set_targets( true, 53.46, 58.21, 60.59, 65.34, 59.40, -40, 100);
plan.regions[ 7].set_targets(false, -1, -1, -1, 60, 60, 10, 5);
plan.regions[ 8].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 9].set_targets(false, -1, -1, -1, 74.25, 74.25, 40, 5);
plan.regions[10].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
gurobi_avg_sll = -1;
gurobi_avg_slr = -1;
} else if (plan_n == 5) {
rid_sll = 3;
rid_slr = 4;
plan.regions[ 0].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 1].set_targets(false, -1, -1, -1, 74.25, 74.25, 40, 5);
plan.regions[ 2].set_targets(false, -1, -1, -1, 70, 70, 10, 5);
plan.regions[ 3].set_targets(false, -1, -1, 26, -1, 26, 1, 5);
plan.regions[ 4].set_targets(false, -1, -1, 26, -1, 26, 1, 5);
plan.regions[ 5].set_targets(false, -1, -1, -1, 50, 50, 10, 5);
plan.regions[ 6].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
plan.regions[ 7].set_targets(false, -1, -1, -1, 60, 60, 10, 5);
plan.regions[ 8].set_targets( true, 48.60, 52.92, 55.08, 59.40, 54.00, -100, 150);
plan.regions[ 9].set_targets( true, 54.00, 58.80, 61.20, 66.00, 60.00, -100, 150);
plan.regions[10].set_targets( true, 59.40, 64.67, 67.32, 72.60, 66.00, -60, 50);
plan.regions[11].set_targets(false, -1, -1, -1, -1, -1, 10, 5);
gurobi_avg_sll = -1;
gurobi_avg_slr = -1;
}
optimize_gpu(plan, rid_sll, rid_slr, gurobi_avg_sll, gurobi_avg_slr, stop_ratio);
cudaCheck(cudaMemcpy(plan.fluence, plan.d_fluence, plan.n_beamlets*sizeof(double), cudaMemcpyDeviceToHost));
FILE *f = fopen(out_path, "w");
for (int i = 0; i < plan.n_beamlets; i++) {
fprintf(f, "%.10e\n", plan.fluence[i]);
}
fclose(f);
printf("Last fluence written to %s\n", out_path);
}
|
92f81b354cbd1a75ccd81ac424aec0446a44949b.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 4, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 92f81b354cbd1a75ccd81ac424aec0446a44949b.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 4, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
b205163d5e356909a1be3c74b0f9e8b9127aaae5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <THH/THHGeneral.h>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in hiprand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state)
* in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the
* register spilling problem.
*/
namespace {
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
at::PhiloxCudaState philox_args) {
auto functor = [philox_args] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
auto seeds = at::cuda::philox::unpack(philox_args);
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
blockIdx.x * blockDim.x + threadIdx.x,
std::get<1>(seeds),
&state);
ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda));
};
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t, decltype(functor),
/*max_threads_per_block=*/512,
/*min_blocks_per_sm==*/2>(ret, lambda, functor);
}
struct curand_uniform_wrapper {
hiprandStatePhilox4_32_10_t &state;
__device__ curand_uniform_wrapper(hiprandStatePhilox4_32_10_t &state): state(state) {}
__device__ float operator()() {
uint32_t val = hiprand(&state); //need just bits
constexpr auto MASK = static_cast<uint32_t>((static_cast<uint64_t>(1) << std::numeric_limits<float>::digits) - 1);
constexpr auto DIVISOR = static_cast<float>(1) / (static_cast<uint32_t>(1) << std::numeric_limits<float>::digits);
return (val & MASK) * DIVISOR;
}
};
template <typename scalar_t>
void binomial_cuda_kernel(
at::Tensor& ret,
const at::Tensor& count,
const at::Tensor& prob,
at::PhiloxCudaState philox_args) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(count)
.add_input(prob)
.build();
at::native::distribution_binary_kernel(iter, philox_args,
[philox_args] GPU_LAMBDA (hiprandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) {
#if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__)
auto uniform_lambda = curand_uniform_wrapper(state);
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform);
return static_cast<scalar_t>(sample);
#else
return count; // useless.
#endif
}
);
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
at::PhiloxCudaState philox_args) {
using accscalar_t = at::acc_type<scalar_t, true>;
auto functor = [philox_args] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
auto seeds = at::cuda::philox::unpack(philox_args);
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
blockIdx.x * blockDim.x + threadIdx.x,
std::get<1>(seeds),
&state);
auto uniform_lambda = [&state] __device__ () {
return hiprand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return hiprand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
};
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t, decltype(functor),
/*max_threads_per_block=*/256,
/*min_blocks_per_sm==*/2>(ret, alpha, functor);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true);
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(gamma)
.add_input(gamma_sum)
.build();
at::native::gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) {
auto ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
return ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(20);
}
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs);
});
return ret;
}
Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(42);
}
Tensor ret = at::empty(count.sizes(), count.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] {
binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs);
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs);
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs);
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(self)
.add_input(output)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) {
return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(x)
.add_input(alpha)
.add_input(total)
.build();
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t {
return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
});
return ret;
}
}} // namespace at::native
| b205163d5e356909a1be3c74b0f9e8b9127aaae5.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <THC/THCGeneral.h>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
/**
* Note [Register spilling in curand call for CUDA < 10]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth)
* when called to generate one random number at a time. This is because the line
* unsigned ret = (&state->output.x)[state->STATE++];
* in
* QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state)
* in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever
* storing state.output in registers.
*
* CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels
* we are using curand distributions that utilize curand4 call. curand4 call doesn't have the
* register spilling problem.
*/
namespace {
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
at::PhiloxCudaState philox_args) {
auto functor = [philox_args] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
auto seeds = at::cuda::philox::unpack(philox_args);
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
blockIdx.x * blockDim.x + threadIdx.x,
std::get<1>(seeds),
&state);
ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda));
};
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t, decltype(functor),
/*max_threads_per_block=*/512,
/*min_blocks_per_sm==*/2>(ret, lambda, functor);
}
struct curand_uniform_wrapper {
curandStatePhilox4_32_10_t &state;
__device__ curand_uniform_wrapper(curandStatePhilox4_32_10_t &state): state(state) {}
__device__ float operator()() {
uint32_t val = curand(&state); //need just bits
constexpr auto MASK = static_cast<uint32_t>((static_cast<uint64_t>(1) << std::numeric_limits<float>::digits) - 1);
constexpr auto DIVISOR = static_cast<float>(1) / (static_cast<uint32_t>(1) << std::numeric_limits<float>::digits);
return (val & MASK) * DIVISOR;
}
};
template <typename scalar_t>
void binomial_cuda_kernel(
at::Tensor& ret,
const at::Tensor& count,
const at::Tensor& prob,
at::PhiloxCudaState philox_args) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(count)
.add_input(prob)
.build();
at::native::distribution_binary_kernel(iter, philox_args,
[philox_args] GPU_LAMBDA (curandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) {
#if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__)
auto uniform_lambda = curand_uniform_wrapper(state);
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform);
return static_cast<scalar_t>(sample);
#else
return count; // useless.
#endif
}
);
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
at::PhiloxCudaState philox_args) {
using accscalar_t = at::acc_type<scalar_t, true>;
auto functor = [philox_args] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
auto seeds = at::cuda::philox::unpack(philox_args);
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
blockIdx.x * blockDim.x + threadIdx.x,
std::get<1>(seeds),
&state);
auto uniform_lambda = [&state] __device__ () {
return curand_uniform(&state);
};
BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda);
auto normal_lambda = [&state] __device__ () {
return curand_normal(&state);
};
BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda);
auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::min();
ret_val = (min_value > sample) ? min_value : sample;
};
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t, decltype(functor),
/*max_threads_per_block=*/256,
/*min_blocks_per_sm==*/2>(ret, alpha, functor);
}
template<typename scalar_t>
void dirichlet_scalar_cuda_kernel(
at::Tensor& ret,
const at::Tensor& gamma) {
auto gamma_sum = gamma.sum(-1, true);
at::TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(gamma)
.add_input(gamma_sum)
.build();
at::native::gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) {
auto ret_val = gamma / gamma_sum;
auto min_value = std::numeric_limits<scalar_t>::min();
auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon();
ret_val = (min_value > ret_val) ? min_value : ret_val;
ret_val = (max_value < ret_val) ? max_value : ret_val;
return ret_val;
});
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(20);
}
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs);
});
return ret;
}
Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(42);
}
Tensor ret = at::empty(count.sizes(), count.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] {
binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs);
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs);
});
return ret;
}
Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(10);
}
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] {
Tensor gamma = at::empty(alpha.sizes(), alpha.options());
gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs);
dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma);
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(self)
.add_input(output)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) {
return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
});
return ret;
}
Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) {
Tensor ret = at::empty(x.sizes(), x.options());
TensorIterator iter = at::TensorIteratorConfig()
.add_output(ret)
.add_input(x)
.add_input(alpha)
.add_input(total)
.build();
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t {
return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val);
});
});
return ret;
}
}} // namespace at::native
|
272b1827a4d8eec990f07bef6e20fb4036a803b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void LabelErasedIdx(const T* in_dat,
const int64_t in_len,
const int* tokens,
const size_t tokens_len,
size_t* num_erased) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
for (size_t i = 0; i < tokens_len; ++i) {
if (in_dat[index] == tokens[i]) {
num_erased[index + 1] = 1;
break;
}
}
}
}
__global__ void GetOutLod(const size_t* num_erased,
const size_t* in_lod,
const size_t lod_len,
size_t* out_lod0) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lod_len) {
out_lod0[index] = in_lod[index] - num_erased[in_lod[index]];
}
}
template <typename T>
__global__ void SetOutput(const T* in_dat,
const int64_t in_len,
const size_t* num_erased,
T* out_dat) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
if (num_erased[index] == num_erased[index + 1]) {
out_dat[index - num_erased[index]] = in_dat[index];
}
}
}
template <typename T, typename DeviceContext>
class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto lod = in->lod();
PADDLE_ENFORCE_EQ(
lod[lod.size() - 1].back(),
(size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel();
auto in_dat = in->data<T>();
// Copy tokens to GPU
thrust::device_vector<int> dev_tokens(tokens.begin(), tokens.end());
int* dev_tokens_ptr = thrust::raw_pointer_cast(dev_tokens.data());
// Count number of elements to be erased
thrust::device_vector<size_t> num_erased(in_len + 1, 0);
size_t* num_erased_ptr = thrust::raw_pointer_cast(num_erased.data());
auto stream = ctx.cuda_device_context().stream();
hipLaunchKernelGGL(( LabelErasedIdx), dim3((in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream,
in_dat, in_len, dev_tokens_ptr, tokens.size(), num_erased_ptr);
thrust::inclusive_scan(
num_erased.begin() + 1, num_erased.end(), num_erased.begin() + 1);
// Copy LoD to GPU
auto last_lod = lod[lod.size() - 1];
auto lod_len = last_lod.size();
phi::MixVector<size_t> mixv_last_lod(&last_lod);
const size_t* dev_in_lod_ptr = mixv_last_lod.CUDAData(ctx.GetPlace());
// Calc output LoD
thrust::device_vector<size_t> dev_out_lod(lod_len);
size_t* dev_out_lod_ptr = thrust::raw_pointer_cast(dev_out_lod.data());
hipLaunchKernelGGL(( GetOutLod), dim3((lod_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream,
num_erased_ptr, dev_in_lod_ptr, lod_len, dev_out_lod_ptr);
// Set LoD for output
std::vector<size_t> out_last_lod(dev_out_lod.begin(), dev_out_lod.end());
framework::LoD out_lod;
for (size_t i = 0; i < lod.size() - 1; ++i) {
out_lod.push_back(lod[i]);
}
out_lod.push_back(out_last_lod);
out->set_lod(out_lod);
// Set output
out->Resize({static_cast<int64_t>(out_last_lod.back()), 1});
auto out_dat = out->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( SetOutput), dim3((in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream, in_dat, in_len, num_erased_ptr, out_dat);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(sequence_erase,
GPU,
ALL_LAYOUT,
ops::SequenceEraseOpCUDAKernel,
int32_t,
int64_t) {}
| 272b1827a4d8eec990f07bef6e20fb4036a803b5.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void LabelErasedIdx(const T* in_dat,
const int64_t in_len,
const int* tokens,
const size_t tokens_len,
size_t* num_erased) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
for (size_t i = 0; i < tokens_len; ++i) {
if (in_dat[index] == tokens[i]) {
num_erased[index + 1] = 1;
break;
}
}
}
}
__global__ void GetOutLod(const size_t* num_erased,
const size_t* in_lod,
const size_t lod_len,
size_t* out_lod0) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lod_len) {
out_lod0[index] = in_lod[index] - num_erased[in_lod[index]];
}
}
template <typename T>
__global__ void SetOutput(const T* in_dat,
const int64_t in_len,
const size_t* num_erased,
T* out_dat) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
if (num_erased[index] == num_erased[index + 1]) {
out_dat[index - num_erased[index]] = in_dat[index];
}
}
}
template <typename T, typename DeviceContext>
class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto lod = in->lod();
PADDLE_ENFORCE_EQ(
lod[lod.size() - 1].back(),
(size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel();
auto in_dat = in->data<T>();
// Copy tokens to GPU
thrust::device_vector<int> dev_tokens(tokens.begin(), tokens.end());
int* dev_tokens_ptr = thrust::raw_pointer_cast(dev_tokens.data());
// Count number of elements to be erased
thrust::device_vector<size_t> num_erased(in_len + 1, 0);
size_t* num_erased_ptr = thrust::raw_pointer_cast(num_erased.data());
auto stream = ctx.cuda_device_context().stream();
LabelErasedIdx<<<(in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(
in_dat, in_len, dev_tokens_ptr, tokens.size(), num_erased_ptr);
thrust::inclusive_scan(
num_erased.begin() + 1, num_erased.end(), num_erased.begin() + 1);
// Copy LoD to GPU
auto last_lod = lod[lod.size() - 1];
auto lod_len = last_lod.size();
phi::MixVector<size_t> mixv_last_lod(&last_lod);
const size_t* dev_in_lod_ptr = mixv_last_lod.CUDAData(ctx.GetPlace());
// Calc output LoD
thrust::device_vector<size_t> dev_out_lod(lod_len);
size_t* dev_out_lod_ptr = thrust::raw_pointer_cast(dev_out_lod.data());
GetOutLod<<<(lod_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(
num_erased_ptr, dev_in_lod_ptr, lod_len, dev_out_lod_ptr);
// Set LoD for output
std::vector<size_t> out_last_lod(dev_out_lod.begin(), dev_out_lod.end());
framework::LoD out_lod;
for (size_t i = 0; i < lod.size() - 1; ++i) {
out_lod.push_back(lod[i]);
}
out_lod.push_back(out_last_lod);
out->set_lod(out_lod);
// Set output
out->Resize({static_cast<int64_t>(out_last_lod.back()), 1});
auto out_dat = out->mutable_data<T>(ctx.GetPlace());
SetOutput<<<(in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(in_dat, in_len, num_erased_ptr, out_dat);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(sequence_erase,
GPU,
ALL_LAYOUT,
ops::SequenceEraseOpCUDAKernel,
int32_t,
int64_t) {}
|
079499925bf57cc26959f0d1440d86149d6f67ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "aes.h"
// state - array holding the intermediate results during decryption.
typedef uint8_t state_t[4][4];
// The array that stores the round keys.
//__device__ static const uint8_t* RoundKey;
__device__ uintmax_t get_global_index(void)
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
// prints string as hex
__device__ static void phex(uint8_t* str) {
unsigned char i;
for (i = 0; i < 16; ++i)
printf("%.2x", str[i]);
printf("\n");
}
__device__ static void print_state(state_t* state, char message[]) {
uintmax_t idx = get_global_index();
uint8_t i, j;
//for (i = 0; i < 4; i++)
printf("[thread %lld] state %s\n%.2x %.2x %.2x %.2x\n%.2x %.2x %.2x %.2x\n%.2x %.2x %.2x %.2x\n%.2x %.2x %.2x %.2x\n", idx, message,
(*state)[0][0], (*state)[0][1], (*state)[0][2], (*state)[0][3],
(*state)[1][0], (*state)[1][1], (*state)[1][2], (*state)[1][3],
(*state)[2][0], (*state)[2][1], (*state)[2][2], (*state)[2][3],
(*state)[3][0], (*state)[3][1], (*state)[3][2], (*state)[3][3]);
}
//
//__device__ static void printKey() {
// printf("RoundKey:\n");
// unsigned char i, j;
// for (j = 0; j < ROUNDS + 1; ++j) {
// for (i = 0; i < KEYLENGTH; ++i)
// printf("%.2x", RoundKey[(j*KEYLENGTH) + i]);
// printf("\n");
// }
//}
// Lookup-tables
__device__ __constant__ uint8_t d_sbox[256] = {
//0 1 2 3 4 5 6 7 8 9 A B C D E F
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
// XOR the round key on state.
__device__ void AddRoundKey(state_t* state, uint8_t* roundKey, uint8_t round) {
//uintmax_t idx = get_global_index();
//printf("[Thread %lld] roundKey: %.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x\n", idx,
// roundKey[round*BLOCKSIZE + 0], roundKey[round*BLOCKSIZE + 1], roundKey[round*BLOCKSIZE + 2], roundKey[round*BLOCKSIZE + 3],
// roundKey[round*BLOCKSIZE + 4], roundKey[round*BLOCKSIZE + 5], roundKey[round*BLOCKSIZE + 6], roundKey[round*BLOCKSIZE + 7],
// roundKey[round*BLOCKSIZE + 8], roundKey[round*BLOCKSIZE + 9], roundKey[round*BLOCKSIZE + 10], roundKey[round*BLOCKSIZE + 11],
// roundKey[round*BLOCKSIZE + 12], roundKey[round*BLOCKSIZE + 13], roundKey[round*BLOCKSIZE + 14], roundKey[round*BLOCKSIZE + 15]);
uint8_t i, j;
for (i = 0; i<4; ++i) {
for (j = 0; j < 4; ++j) {
//printf("[Thread %lld] (*state)[%d][%d] before: %.2x\n", idx, i, j, (*state)[i][j]);
(*state)[i][j] ^= roundKey[round * LANESIZE * 4 + i * LANESIZE + j];
//printf("[Thread %lld] (*state)[%d][%d] after: %.2x\n", idx, i, j, (*state)[i][j]);
}
}
}
// The SubBytes Function Substitutes the values in the
// state matrix with values in an S-box.
__device__ void SubBytes(state_t* state, uint8_t* s_sbox)
{
uint8_t i, j;
for (i = 0; i < 4; ++i)
{
for (j = 0; j < 4; ++j)
{
(*state)[j][i] = s_sbox[(*state)[j][i]];
}
}
}
// The ShiftRows() function shifts the rows in the state to the left.
// Each row is shifted with different offset.
// Offset = Row number. So the first row is not shifted.
__device__ void ShiftRows(state_t* state)
{
uint8_t temp;
// Rotate first row 1 columns to left
temp = (*state)[0][1];
(*state)[0][1] = (*state)[1][1];
(*state)[1][1] = (*state)[2][1];
(*state)[2][1] = (*state)[3][1];
(*state)[3][1] = temp;
// Rotate second row 2 columns to left
temp = (*state)[0][2];
(*state)[0][2] = (*state)[2][2];
(*state)[2][2] = temp;
temp = (*state)[1][2];
(*state)[1][2] = (*state)[3][2];
(*state)[3][2] = temp;
// Rotate third row 3 columns to left
temp = (*state)[0][3];
(*state)[0][3] = (*state)[3][3];
(*state)[3][3] = (*state)[2][3];
(*state)[2][3] = (*state)[1][3];
(*state)[1][3] = temp;
}
__device__ uint8_t xtime(uint8_t x)
{
return ((x << 1) ^ (((x >> 7) & 1) * 0x1b));
}
// MixColumns function mixes the columns of the state matrix
__device__ void MixColumns(state_t* state)
{
uint8_t i;
uint8_t Tmp, Tm, t;
for (i = 0; i < 4; ++i)
{
t = (*state)[i][0];
Tmp = (*state)[i][0] ^ (*state)[i][1] ^ (*state)[i][2] ^ (*state)[i][3];
Tm = (*state)[i][0] ^ (*state)[i][1]; Tm = xtime(Tm); (*state)[i][0] ^= Tm ^ Tmp;
Tm = (*state)[i][1] ^ (*state)[i][2]; Tm = xtime(Tm); (*state)[i][1] ^= Tm ^ Tmp;
Tm = (*state)[i][2] ^ (*state)[i][3]; Tm = xtime(Tm); (*state)[i][2] ^= Tm ^ Tmp;
Tm = (*state)[i][3] ^ t; Tm = xtime(Tm); (*state)[i][3] ^= Tm ^ Tmp;
}
}
// Cipher is the main function that encrypts the PlainText.
__device__ void Cipher(state_t* state, uint8_t* roundKey, uint8_t* s_sbox)
{
uint8_t round = 0;
// Add the First round key to the state before starting the rounds.
AddRoundKey(state, roundKey, round);
//print_state(state, "after first round key added");
// There will be ROUNDS rounds.
// The first ROUNDS-1 rounds are identical.
// These ROUNDS-1 rounds are executed in the loop below.
for (round = 1; round < ROUNDS; ++round)
{
SubBytes(state, s_sbox);
ShiftRows(state);
MixColumns(state);
AddRoundKey(state, roundKey, round);
//print_state(state, "after round key added");
}
// The last round is given below.
// The MixColumns function is not here in the last round.
SubBytes(state, s_sbox);
ShiftRows(state);
AddRoundKey(state, roundKey, ROUNDS);
//print_state(state, "after last round key added");
}
__device__ void AES128_ECB_encrypt(uint8_t* ciphertext_block, uint8_t* roundKey, uint8_t* s_sbox) {
state_t* state = (state_t*)ciphertext_block;
//print_state(state, "after init");
// The next function call encrypts the PlainText with the Key using AES algorithm.
Cipher(state, roundKey, s_sbox);
}
__global__ void cuda_encrypt_block(uint8_t* d_ciphertext, uint8_t* d_plaintext, uint8_t* d_roundKey, uintmax_t plaintext_blocks) {
uintmax_t idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint8_t s_roundKey[BLOCKSIZE * (ROUNDS + 1)];
//__shared__ uint8_t s_ciphertext[BLOCKSIZE * THREADS_PER_BLOCK];
__shared__ uint8_t s_sbox[256];
uintmax_t offset = idx*BLOCKSIZE;
uintmax_t block_offset = (idx % THREADS_PER_BLOCK) * BLOCKSIZE;
// if there are enough THREADS_PER_BLOCK, the round key allocation to shared memory is performed by (ROUNDS + 1) threads in parallel
if (THREADS_PER_BLOCK >= (ROUNDS + 1) && (idx % THREADS_PER_BLOCK) < (ROUNDS + 1)) {
memcpy(s_roundKey + block_offset, d_roundKey + block_offset, BLOCKSIZE);
}
// if not, this is done only by the first thread in a block
else if ((idx % THREADS_PER_BLOCK) == 0) {
memcpy(s_roundKey, d_roundKey, BLOCKSIZE*(ROUNDS + 1));
}
// first thread in a block copies sbox from constant to shared memory
if ((idx % THREADS_PER_BLOCK) == 0) {
memcpy(s_sbox, d_sbox, sizeof(uint8_t) * 256);
}
__syncthreads();
if (idx < plaintext_blocks) {
//memcpy(s_ciphertext + block_offset, d_plaintext + offset, BLOCKSIZE);
memcpy(d_ciphertext + offset, d_plaintext + offset, BLOCKSIZE);
// each plaintext block is encrypted by an individual thread
AES128_ECB_encrypt(d_ciphertext + block_offset, s_roundKey, s_sbox);
//memcpy(d_ciphertext + offset, s_ciphertext + block_offset, sizeof(uint8_t)*BLOCKSIZE);
}
} | 079499925bf57cc26959f0d1440d86149d6f67ed.cu | #include "aes.h"
// state - array holding the intermediate results during decryption.
typedef uint8_t state_t[4][4];
// The array that stores the round keys.
//__device__ static const uint8_t* RoundKey;
__device__ uintmax_t get_global_index(void)
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
// prints string as hex
__device__ static void phex(uint8_t* str) {
unsigned char i;
for (i = 0; i < 16; ++i)
printf("%.2x", str[i]);
printf("\n");
}
__device__ static void print_state(state_t* state, char message[]) {
uintmax_t idx = get_global_index();
uint8_t i, j;
//for (i = 0; i < 4; i++)
printf("[thread %lld] state %s\n%.2x %.2x %.2x %.2x\n%.2x %.2x %.2x %.2x\n%.2x %.2x %.2x %.2x\n%.2x %.2x %.2x %.2x\n", idx, message,
(*state)[0][0], (*state)[0][1], (*state)[0][2], (*state)[0][3],
(*state)[1][0], (*state)[1][1], (*state)[1][2], (*state)[1][3],
(*state)[2][0], (*state)[2][1], (*state)[2][2], (*state)[2][3],
(*state)[3][0], (*state)[3][1], (*state)[3][2], (*state)[3][3]);
}
//
//__device__ static void printKey() {
// printf("RoundKey:\n");
// unsigned char i, j;
// for (j = 0; j < ROUNDS + 1; ++j) {
// for (i = 0; i < KEYLENGTH; ++i)
// printf("%.2x", RoundKey[(j*KEYLENGTH) + i]);
// printf("\n");
// }
//}
// Lookup-tables
__device__ __constant__ uint8_t d_sbox[256] = {
//0 1 2 3 4 5 6 7 8 9 A B C D E F
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
// XOR the round key on state.
__device__ void AddRoundKey(state_t* state, uint8_t* roundKey, uint8_t round) {
//uintmax_t idx = get_global_index();
//printf("[Thread %lld] roundKey: %.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x\n", idx,
// roundKey[round*BLOCKSIZE + 0], roundKey[round*BLOCKSIZE + 1], roundKey[round*BLOCKSIZE + 2], roundKey[round*BLOCKSIZE + 3],
// roundKey[round*BLOCKSIZE + 4], roundKey[round*BLOCKSIZE + 5], roundKey[round*BLOCKSIZE + 6], roundKey[round*BLOCKSIZE + 7],
// roundKey[round*BLOCKSIZE + 8], roundKey[round*BLOCKSIZE + 9], roundKey[round*BLOCKSIZE + 10], roundKey[round*BLOCKSIZE + 11],
// roundKey[round*BLOCKSIZE + 12], roundKey[round*BLOCKSIZE + 13], roundKey[round*BLOCKSIZE + 14], roundKey[round*BLOCKSIZE + 15]);
uint8_t i, j;
for (i = 0; i<4; ++i) {
for (j = 0; j < 4; ++j) {
//printf("[Thread %lld] (*state)[%d][%d] before: %.2x\n", idx, i, j, (*state)[i][j]);
(*state)[i][j] ^= roundKey[round * LANESIZE * 4 + i * LANESIZE + j];
//printf("[Thread %lld] (*state)[%d][%d] after: %.2x\n", idx, i, j, (*state)[i][j]);
}
}
}
// The SubBytes Function Substitutes the values in the
// state matrix with values in an S-box.
__device__ void SubBytes(state_t* state, uint8_t* s_sbox)
{
uint8_t i, j;
for (i = 0; i < 4; ++i)
{
for (j = 0; j < 4; ++j)
{
(*state)[j][i] = s_sbox[(*state)[j][i]];
}
}
}
// The ShiftRows() function shifts the rows in the state to the left.
// Each row is shifted with different offset.
// Offset = Row number. So the first row is not shifted.
__device__ void ShiftRows(state_t* state)
{
uint8_t temp;
// Rotate first row 1 columns to left
temp = (*state)[0][1];
(*state)[0][1] = (*state)[1][1];
(*state)[1][1] = (*state)[2][1];
(*state)[2][1] = (*state)[3][1];
(*state)[3][1] = temp;
// Rotate second row 2 columns to left
temp = (*state)[0][2];
(*state)[0][2] = (*state)[2][2];
(*state)[2][2] = temp;
temp = (*state)[1][2];
(*state)[1][2] = (*state)[3][2];
(*state)[3][2] = temp;
// Rotate third row 3 columns to left
temp = (*state)[0][3];
(*state)[0][3] = (*state)[3][3];
(*state)[3][3] = (*state)[2][3];
(*state)[2][3] = (*state)[1][3];
(*state)[1][3] = temp;
}
__device__ uint8_t xtime(uint8_t x)
{
return ((x << 1) ^ (((x >> 7) & 1) * 0x1b));
}
// MixColumns function mixes the columns of the state matrix
__device__ void MixColumns(state_t* state)
{
uint8_t i;
uint8_t Tmp, Tm, t;
for (i = 0; i < 4; ++i)
{
t = (*state)[i][0];
Tmp = (*state)[i][0] ^ (*state)[i][1] ^ (*state)[i][2] ^ (*state)[i][3];
Tm = (*state)[i][0] ^ (*state)[i][1]; Tm = xtime(Tm); (*state)[i][0] ^= Tm ^ Tmp;
Tm = (*state)[i][1] ^ (*state)[i][2]; Tm = xtime(Tm); (*state)[i][1] ^= Tm ^ Tmp;
Tm = (*state)[i][2] ^ (*state)[i][3]; Tm = xtime(Tm); (*state)[i][2] ^= Tm ^ Tmp;
Tm = (*state)[i][3] ^ t; Tm = xtime(Tm); (*state)[i][3] ^= Tm ^ Tmp;
}
}
// Cipher is the main function that encrypts the PlainText.
__device__ void Cipher(state_t* state, uint8_t* roundKey, uint8_t* s_sbox)
{
uint8_t round = 0;
// Add the First round key to the state before starting the rounds.
AddRoundKey(state, roundKey, round);
//print_state(state, "after first round key added");
// There will be ROUNDS rounds.
// The first ROUNDS-1 rounds are identical.
// These ROUNDS-1 rounds are executed in the loop below.
for (round = 1; round < ROUNDS; ++round)
{
SubBytes(state, s_sbox);
ShiftRows(state);
MixColumns(state);
AddRoundKey(state, roundKey, round);
//print_state(state, "after round key added");
}
// The last round is given below.
// The MixColumns function is not here in the last round.
SubBytes(state, s_sbox);
ShiftRows(state);
AddRoundKey(state, roundKey, ROUNDS);
//print_state(state, "after last round key added");
}
__device__ void AES128_ECB_encrypt(uint8_t* ciphertext_block, uint8_t* roundKey, uint8_t* s_sbox) {
state_t* state = (state_t*)ciphertext_block;
//print_state(state, "after init");
// The next function call encrypts the PlainText with the Key using AES algorithm.
Cipher(state, roundKey, s_sbox);
}
__global__ void cuda_encrypt_block(uint8_t* d_ciphertext, uint8_t* d_plaintext, uint8_t* d_roundKey, uintmax_t plaintext_blocks) {
uintmax_t idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint8_t s_roundKey[BLOCKSIZE * (ROUNDS + 1)];
//__shared__ uint8_t s_ciphertext[BLOCKSIZE * THREADS_PER_BLOCK];
__shared__ uint8_t s_sbox[256];
uintmax_t offset = idx*BLOCKSIZE;
uintmax_t block_offset = (idx % THREADS_PER_BLOCK) * BLOCKSIZE;
// if there are enough THREADS_PER_BLOCK, the round key allocation to shared memory is performed by (ROUNDS + 1) threads in parallel
if (THREADS_PER_BLOCK >= (ROUNDS + 1) && (idx % THREADS_PER_BLOCK) < (ROUNDS + 1)) {
memcpy(s_roundKey + block_offset, d_roundKey + block_offset, BLOCKSIZE);
}
// if not, this is done only by the first thread in a block
else if ((idx % THREADS_PER_BLOCK) == 0) {
memcpy(s_roundKey, d_roundKey, BLOCKSIZE*(ROUNDS + 1));
}
// first thread in a block copies sbox from constant to shared memory
if ((idx % THREADS_PER_BLOCK) == 0) {
memcpy(s_sbox, d_sbox, sizeof(uint8_t) * 256);
}
__syncthreads();
if (idx < plaintext_blocks) {
//memcpy(s_ciphertext + block_offset, d_plaintext + offset, BLOCKSIZE);
memcpy(d_ciphertext + offset, d_plaintext + offset, BLOCKSIZE);
// each plaintext block is encrypted by an individual thread
AES128_ECB_encrypt(d_ciphertext + block_offset, s_roundKey, s_sbox);
//memcpy(d_ciphertext + offset, s_ciphertext + block_offset, sizeof(uint8_t)*BLOCKSIZE);
}
} |
d9e97d3b7a97141103bc516dce14ee633bd78237.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Correation.cu
* \brief Correlation operator
* \author Xu Dong
*/
#include "./correlation-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "./mxnet_op.h"
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CORRELATION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads,
int num,
int topwidth,
int topheight,
int topchannels,
int topcount,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int kernel_size,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int bottomchannels,
const Dtype* bottom0,
const Dtype* bottom1,
Dtype* top) {
extern __shared__ char patch_data_char[];
Dtype* patch_data = reinterpret_cast<Dtype*>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size * kernel_size * bottomchannels;
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item * topcount] = total_sum / static_cast<float>(sumelems);
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
Dtype* bottom0diff,
const Dtype* bottom1,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 =
((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o)) * bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
const Dtype* bottom0,
Dtype* bottom1diff,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
// Get bottom0 data:
int idxbot0 =
((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Kernel Subtraction
template <typename Dtype>
__global__ void CorrelateDataSubtract(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int topcount,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int bottomchannels,
const Dtype* bottom0,
const Dtype* bottom1,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; // w-pos
int y = (index / topwidth) % topheight; // h-pos
int c = (index / topwidth / topheight) % topchannels; // channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x * stride1 + kernel_radius + max_displacement;
int y1 = y * stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
Dtype sum = 0;
for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for (int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) * bottomchannels + l;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
top[index + item * topcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
template <typename Dtype>
__global__ void CorrelateDataBackward0Subtract(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
Dtype* bottom0diff,
const Dtype* bottom0,
const Dtype* bottom1,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l) * bottomchannels + n;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 =
((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0);
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1Subtract(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
const Dtype* bottom0,
const Dtype* bottom1,
Dtype* bottom1diff,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l) * bottomchannels + n;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
// Get bottom0 data:
int idxbot0 =
((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o)) * bottomchannels + n;
// bottom0[l+s2o,m+s2p,n]
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0);
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in,
Dtype* out,
int num,
int channels,
int width,
int height,
int widthheight,
int padding,
int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight)
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
template <typename Dtype>
void Forward_gpu(const Tensor<gpu, 4, Dtype>& out,
const Tensor<gpu, 4, Dtype>& data1,
const Tensor<gpu, 4, Dtype>& data2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_,
hipStream_t stream,
hipStream_t stream_tmp1,
hipStream_t stream_tmp2) {
const Dtype* bottom_data1 = data1.dptr_;
const Dtype* bottom_data2 = data2.dptr_;
Dtype* rbot1 = tmp1.dptr_;
Dtype* rbot2 = tmp2.dptr_;
Dtype* top = out.dptr_;
const int bnum = data1.size(0);
const int bchannels = data1.size(1);
const int bheight = data1.size(2);
const int bwidth = data1.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width_ * top_height_ * top_channels_;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_);
hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp1,
bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp2,
bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
const int num = bnum;
const int channels = bchannels;
const int height = bheight + 2 * pad_size_;
const int width = bwidth + 2 * pad_size_;
const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels;
if (is_multiply == true) {
// CorrelationLayer
int topThreadCount = topcount;
dim3 totalBlocksCorr(top_width_, top_height_, num);
hipLaunchKernelGGL(( CorrelateData<Dtype>)
, dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(Dtype), stream,
topThreadCount,
num,
top_width_,
top_height_,
top_channels_,
topcount,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
kernel_size_,
stride1_,
stride2_,
width,
height,
channels,
rbot1,
rbot2,
top);
CORRELATION_CUDA_CHECK(hipGetLastError());
} else {
// CorrelationLayer
for (int n = 0; n < num; n++) {
int topThreadCount = topcount;
const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
hipLaunchKernelGGL(( CorrelateDataSubtract<Dtype>)
, dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream, topThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
topcount,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
channels,
rbot1,
rbot2,
top);
CORRELATION_CUDA_CHECK(hipGetLastError());
}
}
}
template <typename Dtype>
void Backward_gpu(const Tensor<gpu, 4, Dtype>& out_grad,
const Tensor<gpu, 4, Dtype>& in_grad1,
const Tensor<gpu, 4, Dtype>& in_grad2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_,
hipStream_t stream0,
hipStream_t stream1,
int num,
int channels,
int height,
int width) {
// Get top diff, compute bottom diff
const Dtype* top_diff = out_grad.dptr_;
Dtype* bottom0_diff = in_grad1.dptr_;
Dtype* bottom1_diff = in_grad2.dptr_;
const Dtype* rbot1 = tmp1.dptr_;
const Dtype* rbot2 = tmp2.dptr_;
const int paddedheight = height + 2 * pad_size_;
const int paddedwidth = width + 2 * pad_size_;
const int bottomcount = channels * height * width;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
// CorrelationLayerBackward
if (is_multiply == true) {
// == Run kernel Backward 0
dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest
const int buffer_size_backw0 = (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_) /
static_cast<float>(stride1_))) +
1) *
top_channels_;
// == Run kernel Backward 0
for (int n = 0; n < num; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward0<Dtype>)
, dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0, botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
bottom0_diff,
rbot2,
top_diff);
CORRELATION_CUDA_CHECK(hipGetLastError());
}
// == Run kernel Backward 1
for (int n = 0; n < num; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward1<Dtype>)
, dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1, botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
rbot1,
bottom1_diff,
top_diff);
CORRELATION_CUDA_CHECK(hipGetLastError());
}
} else {
for (int n = 0; n < num; n++) {
// Bottom0:
hipLaunchKernelGGL(( CorrelateDataBackward0Subtract<Dtype>)
, dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0, botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
bottom0_diff,
rbot1,
rbot2,
top_diff);
CORRELATION_CUDA_CHECK(hipGetLastError());
}
for (int n = 0; n < num; n++) {
// Bottom1:
hipLaunchKernelGGL(( CorrelateDataBackward1Subtract<Dtype>)
, dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1, botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
rbot1,
rbot2,
bottom1_diff,
top_diff);
CORRELATION_CUDA_CHECK(hipGetLastError());
}
}
}
} // namespace cuda
template <typename Dtype>
inline void CorrelationForward(const Tensor<gpu, 4, Dtype>& out,
const Tensor<gpu, 4, Dtype>& data1,
const Tensor<gpu, 4, Dtype>& data2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_) {
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_);
hipStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_);
cuda::Forward_gpu(out,
data1,
data2,
tmp1,
tmp2,
top_channels_,
top_height_,
top_width_,
pad_size_,
is_multiply,
max_displacement_,
kernel_size_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
stream,
stream_tmp1,
stream_tmp2);
}
template <typename Dtype>
inline void CorrelationBackward(const Tensor<gpu, 4, Dtype>& out_grad,
const Tensor<gpu, 4, Dtype>& in_grad1,
const Tensor<gpu, 4, Dtype>& in_grad2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_,
int num,
int channels,
int height,
int width) {
hipStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_);
hipStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_);
cuda::Backward_gpu(out_grad,
in_grad1,
in_grad2,
tmp1,
tmp2,
top_channels_,
top_height_,
top_width_,
pad_size_,
is_multiply,
max_displacement_,
kernel_size_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
stream0,
stream1,
num,
channels,
height,
width);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator* CreateOp<gpu>(CorrelationParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new CorrelationOp<gpu, DType>(param); });
return op;
}
} // namespace op
} // namespace mxnet
| d9e97d3b7a97141103bc516dce14ee633bd78237.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Correation.cu
* \brief Correlation operator
* \author Xu Dong
*/
#include "./correlation-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "./mxnet_op.h"
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CORRELATION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads,
int num,
int topwidth,
int topheight,
int topchannels,
int topcount,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int kernel_size,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int bottomchannels,
const Dtype* bottom0,
const Dtype* bottom1,
Dtype* top) {
extern __shared__ char patch_data_char[];
Dtype* patch_data = reinterpret_cast<Dtype*>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size * kernel_size * bottomchannels;
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item * topcount] = total_sum / static_cast<float>(sumelems);
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
Dtype* bottom0diff,
const Dtype* bottom1,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 =
((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o)) * bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
const Dtype* bottom0,
Dtype* bottom1diff,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
// Get bottom0 data:
int idxbot0 =
((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Kernel Subtraction
template <typename Dtype>
__global__ void CorrelateDataSubtract(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int topcount,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int bottomchannels,
const Dtype* bottom0,
const Dtype* bottom1,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; // w-pos
int y = (index / topwidth) % topheight; // h-pos
int c = (index / topwidth / topheight) % topchannels; // channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x * stride1 + kernel_radius + max_displacement;
int y1 = y * stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
Dtype sum = 0;
for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for (int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) * bottomchannels + l;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
top[index + item * topcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
template <typename Dtype>
__global__ void CorrelateDataBackward0Subtract(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
Dtype* bottom0diff,
const Dtype* bottom0,
const Dtype* bottom1,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l) * bottomchannels + n;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 -
round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 =
((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0);
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1Subtract(const int nthreads,
int num,
int item,
int topwidth,
int topheight,
int topchannels,
int max_displacement,
int neighborhood_grid_radius,
int neighborhood_grid_width,
int kernel_radius,
int stride1,
int stride2,
int bottomwidth,
int bottomheight,
int pbottomwidth,
int pbottomheight,
int bottomchannels,
int bottomcount,
int pad_size,
const Dtype* bottom0,
const Dtype* bottom1,
Dtype* bottom1diff,
const Dtype* topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l) * bottomchannels + n;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2 * kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2 * kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 +
1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth - 1, xmax);
ymin = max(0, ymin);
ymax = min(topheight - 1, ymax);
// Get bottom0 data:
int idxbot0 =
((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o)) * bottomchannels + n;
// bottom0[l+s2o,m+s2p,n]
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0);
// Index offset for topdiff in following loops:
int op = (p + neighborhood_grid_radius) * neighborhood_grid_width +
(o + neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in,
Dtype* out,
int num,
int channels,
int width,
int height,
int widthheight,
int padding,
int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight)
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
template <typename Dtype>
void Forward_gpu(const Tensor<gpu, 4, Dtype>& out,
const Tensor<gpu, 4, Dtype>& data1,
const Tensor<gpu, 4, Dtype>& data2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_,
cudaStream_t stream,
cudaStream_t stream_tmp1,
cudaStream_t stream_tmp2) {
const Dtype* bottom_data1 = data1.dptr_;
const Dtype* bottom_data2 = data2.dptr_;
Dtype* rbot1 = tmp1.dptr_;
Dtype* rbot2 = tmp2.dptr_;
Dtype* top = out.dptr_;
const int bnum = data1.size(0);
const int bchannels = data1.size(1);
const int bheight = data1.size(2);
const int bwidth = data1.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width_ * top_height_ * top_channels_;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_);
blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp1>>>(
bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp2>>>(
bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
const int num = bnum;
const int channels = bchannels;
const int height = bheight + 2 * pad_size_;
const int width = bwidth + 2 * pad_size_;
const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels;
if (is_multiply == true) {
// CorrelationLayer
int topThreadCount = topcount;
dim3 totalBlocksCorr(top_width_, top_height_, num);
CorrelateData<Dtype>
<<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(Dtype), stream>>>(
topThreadCount,
num,
top_width_,
top_height_,
top_channels_,
topcount,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
kernel_size_,
stride1_,
stride2_,
width,
height,
channels,
rbot1,
rbot2,
top);
CORRELATION_CUDA_CHECK(cudaGetLastError());
} else {
// CorrelationLayer
for (int n = 0; n < num; n++) {
int topThreadCount = topcount;
const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CorrelateDataSubtract<Dtype>
<<<gridSize, kMaxThreadsPerBlock, 0, stream>>>(topThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
topcount,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
channels,
rbot1,
rbot2,
top);
CORRELATION_CUDA_CHECK(cudaGetLastError());
}
}
}
template <typename Dtype>
void Backward_gpu(const Tensor<gpu, 4, Dtype>& out_grad,
const Tensor<gpu, 4, Dtype>& in_grad1,
const Tensor<gpu, 4, Dtype>& in_grad2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_,
cudaStream_t stream0,
cudaStream_t stream1,
int num,
int channels,
int height,
int width) {
// Get top diff, compute bottom diff
const Dtype* top_diff = out_grad.dptr_;
Dtype* bottom0_diff = in_grad1.dptr_;
Dtype* bottom1_diff = in_grad2.dptr_;
const Dtype* rbot1 = tmp1.dptr_;
const Dtype* rbot2 = tmp2.dptr_;
const int paddedheight = height + 2 * pad_size_;
const int paddedwidth = width + 2 * pad_size_;
const int bottomcount = channels * height * width;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
// CorrelationLayerBackward
if (is_multiply == true) {
// == Run kernel Backward 0
dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest
const int buffer_size_backw0 = (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_) /
static_cast<float>(stride1_))) +
1) *
top_channels_;
// == Run kernel Backward 0
for (int n = 0; n < num; n++) {
CorrelateDataBackward0<Dtype>
<<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>(botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
bottom0_diff,
rbot2,
top_diff);
CORRELATION_CUDA_CHECK(cudaGetLastError());
}
// == Run kernel Backward 1
for (int n = 0; n < num; n++) {
CorrelateDataBackward1<Dtype>
<<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>(botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
rbot1,
bottom1_diff,
top_diff);
CORRELATION_CUDA_CHECK(cudaGetLastError());
}
} else {
for (int n = 0; n < num; n++) {
// Bottom0:
CorrelateDataBackward0Subtract<Dtype>
<<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>(botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
bottom0_diff,
rbot1,
rbot2,
top_diff);
CORRELATION_CUDA_CHECK(cudaGetLastError());
}
for (int n = 0; n < num; n++) {
// Bottom1:
CorrelateDataBackward1Subtract<Dtype>
<<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>(botThreadCount,
num,
n,
top_width_,
top_height_,
top_channels_,
max_displacement_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
width,
height,
paddedwidth,
paddedheight,
channels,
bottomcount,
pad_size_,
rbot1,
rbot2,
bottom1_diff,
top_diff);
CORRELATION_CUDA_CHECK(cudaGetLastError());
}
}
}
} // namespace cuda
template <typename Dtype>
inline void CorrelationForward(const Tensor<gpu, 4, Dtype>& out,
const Tensor<gpu, 4, Dtype>& data1,
const Tensor<gpu, 4, Dtype>& data2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_) {
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
cudaStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_);
cudaStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_);
cuda::Forward_gpu(out,
data1,
data2,
tmp1,
tmp2,
top_channels_,
top_height_,
top_width_,
pad_size_,
is_multiply,
max_displacement_,
kernel_size_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
stream,
stream_tmp1,
stream_tmp2);
}
template <typename Dtype>
inline void CorrelationBackward(const Tensor<gpu, 4, Dtype>& out_grad,
const Tensor<gpu, 4, Dtype>& in_grad1,
const Tensor<gpu, 4, Dtype>& in_grad2,
const Tensor<gpu, 4, Dtype>& tmp1,
const Tensor<gpu, 4, Dtype>& tmp2,
int top_channels_,
int top_height_,
int top_width_,
int pad_size_,
bool is_multiply,
int max_displacement_,
int kernel_size_,
int neighborhood_grid_radius_,
int neighborhood_grid_width_,
int kernel_radius_,
int stride1_,
int stride2_,
int num,
int channels,
int height,
int width) {
cudaStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_);
cudaStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_);
cuda::Backward_gpu(out_grad,
in_grad1,
in_grad2,
tmp1,
tmp2,
top_channels_,
top_height_,
top_width_,
pad_size_,
is_multiply,
max_displacement_,
kernel_size_,
neighborhood_grid_radius_,
neighborhood_grid_width_,
kernel_radius_,
stride1_,
stride2_,
stream0,
stream1,
num,
channels,
height,
width);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template <>
Operator* CreateOp<gpu>(CorrelationParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new CorrelationOp<gpu, DType>(param); });
return op;
}
} // namespace op
} // namespace mxnet
|
e15c9514455564bb5a6d302b146a8c8373804265.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <df/optimization/deformationGraphRegularization.h>
#include <df/util/cudaHelpers.h>
#include <sophus/se3.hpp> // TODO
#include <df/util/dualQuaternion.h> // TODO
namespace df {
//template <typename Scalar, int K>
//struct InsertionIndexSearchUnroller {
// typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// __host__ __device__
// static inline void search(int & insertionIndex,
// const NNVec & currentNeighbors,
// const Vec3 & voxelCenter,
// const Scalar distance,
// const DeviceTensor1<Vec3> & deformationGraphVertices) {
// const int neighborIndex = currentNeighbors(K-1);
// if (neighborIndex >= 0) {
// const Vec3 & neighborVertex = deformationGraphVertices(neighborIndex);
// const Vec3 neighborDiff = voxelCenter - neighborVertex;
// const Scalar neighborDistance = neighborDiff.squaredNorm();
// if (neighborDistance > distance) {
// insertionIndex = K-1;
// } else {
// return;
// }
// } else {
// insertionIndex = K-1;
// }
// InsertionIndexSearchUnroller<Scalar,K-1>::search(insertionIndex, currentNeighbors.template head<K-1>(),
// voxelCenter, distance, deformationGraphVertices);
// }
//};
//template <typename Scalar>
//struct InsertionIndexSearchUnroller<Scalar, 0> {
// typedef Eigen::Matrix<int,0,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// __host__ __device__
// static inline void search(int & /*insertionIndex*/,
// const NNVec & /*currentNeighbors*/,
// const Vec3 & /*voxelCenter*/,
// const Scalar /*distance*/,
// const DeviceTensor1<Vec3> & /*deformationGraphVertices*/) { }
//};
//// TODO: make work for anisotropic grids
//template <typename Scalar, int K>
//__global__ void updateDeformationGraphNearestNeighborsKernel(Tensor<3,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> nearestNeighborGrid,
// const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > deformationGraphVertices,
// const Eigen::Matrix<int,3,1,Eigen::DontAlign> offset,
// const Eigen::Matrix<int,3,1,Eigen::DontAlign> max,
// const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> vertex, const int index) {
// typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// const int x = offset(0) + threadIdx.x + blockDim.x * blockIdx.x;
// const int y = offset(1) + threadIdx.y + blockDim.y * blockIdx.y;
// const int z = offset(2) + threadIdx.z + blockDim.z * blockIdx.z;
// if (x < max(0) && y < max(1) && z < max(2)) {
//// printf("checking %d,%d,%d\n",x,y,z);
// const Vec3 diff = Vec3(x,y,z) - vertex;
// const Scalar distance = diff.squaredNorm();
//// printf("distance = %f\n",distance);
// NNVec & currentNeighbors = nearestNeighborGrid(x,y,z);
//// printf("current = %d %d %d %d\n",currentNeighbors(0),currentNeighbors(1),currentNeighbors(2),currentNeighbors(3));
// int insertionIndex = -1;
// InsertionIndexSearchUnroller<Scalar,K>::search(insertionIndex,currentNeighbors,Vec3(x,y,z),
// distance, deformationGraphVertices);
//// for (int k = K-1; k >= 0; --k) {
//// const int neighborIndex = currentNeighbors(k);
//// if ( neighborIndex >= 0 ) {
//// const Eigen::Map<const Vec3> neighborVertex(&deformationGraphVertices(0,neighborIndex));
//// const Vec3 neighborDiff = Vec3(x,y,z) - neighborVertex;
//// const Scalar neighborDistance = neighborDiff.squaredNorm();
//// if (neighborDistance > distance) {
//// // inserted index is closer
//// insertionIndex = k;
//// } else {
//// // inserted index is farther, the search ends
//// break;
//// }
//// } else {
//// insertionIndex = k;
//// }
//// }
// // check if the inserted vertex belongs in the updated nearest neighbor list
// if (insertionIndex >= 0) {
// for (int k = K-1; k > insertionIndex; --k) {
// currentNeighbors(k) = currentNeighbors(k-1);
// }
// currentNeighbors(insertionIndex) = index;
// }
// }
//}
//template <typename Scalar, int K>
//void updateDeformationGraphNearestNeighbors(VoxelGrid<Scalar,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> & nearestNeighborGrid,
// const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > & deformationGraphVertices,
// const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> & vertex, const int index,
// const Scalar nearestNeighborSigma, const int nSigmas = 3) {
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// typedef Eigen::Matrix<int,3,1,Eigen::DontAlign> Vec3i;
// const Vec3 nSigmaExtent = nearestNeighborSigma * nSigmas * nearestNeighborGrid.worldToGridScale();
// const Vec3 vertexInGridCoords = nearestNeighborGrid.worldToGrid(vertex);
// const Vec3i boundingBoxMin = (vertexInGridCoords - nSigmaExtent).template cast<int>().cwiseMax(Vec3i(0,0,0));
// const Vec3i boundingBoxMax = (vertexInGridCoords + nSigmaExtent + Scalar(0.99999)*Vec3::Ones()).template cast<int>()
// .cwiseMin(nearestNeighborGrid.dimensions().template cast<int>() - Vec3i::Ones());
// std::cout << vertex.transpose() << std::endl;
// std::cout << boundingBoxMin.transpose() << " -> " << boundingBoxMax.transpose() << std::endl;
// const Vec3i boundingBoxSize = boundingBoxMax - boundingBoxMin;
// const dim3 block(16,16,4);
// const dim3 grid(intDivideAndCeil(boundingBoxSize(0),(int)block.x),
// intDivideAndCeil(boundingBoxSize(1),(int)block.y),
// intDivideAndCeil(boundingBoxSize(2),(int)block.z));
// updateDeformationGraphNearestNeighborsKernel<<<grid,block>>>(nearestNeighborGrid.grid(),deformationGraphVertices,boundingBoxMin,boundingBoxMax,vertex,index);
// hipDeviceSynchronize();
// CheckCudaDieOnError();
//}
//template <typename Scalar, template <typename,int...> class TransformT, int K>
//void computeDeformationGraphNearestNeighbors(VoxelGrid<Scalar,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> & nearestNeighborGrid,
// const NonrigidTransformer<Scalar,TransformT> & transformer,
// const Scalar nearestNeighborSigma) {
// typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// NNVec initialNearestNeighborList = -1 * NNVec::Ones();
// nearestNeighborGrid.fill(initialNearestNeighborList);
// const uint numBaseLevelVertices = transformer.numVerticesAtLevel(0);
// ManagedDeviceTensor1<Vec3> baseLevelVertices( numBaseLevelVertices );
// ConstHostTensor1<Vec3> hostBaseLevelVertices(baseLevelVertices.length(), transformer.deformationGraphVertices(0) );
// baseLevelVertices.copyFrom(hostBaseLevelVertices);
// for (uint index = 0; index < numBaseLevelVertices; ++index) {
// const Vec3 & vertex = transformer.deformationGraphVertices(0)[index];
// updateDeformationGraphNearestNeighbors(nearestNeighborGrid,baseLevelVertices,vertex,index,nearestNeighborSigma);
// }
//}
//template void computeDeformationGraphNearestNeighbors(VoxelGrid<float,Eigen::Matrix<int,4,1,Eigen::DontAlign>,DeviceResident> &,
// const NonrigidTransformer<float,Sophus::SE3Group> &,
// const float);
//template void computeDeformationGraphNearestNeighbors(VoxelGrid<float,Eigen::Matrix<int,4,1,Eigen::DontAlign>,DeviceResident> &,
// const NonrigidTransformer<float,DualQuaternion> &,
// const float);
} // namespace df
| e15c9514455564bb5a6d302b146a8c8373804265.cu | #include <df/optimization/deformationGraphRegularization.h>
#include <df/util/cudaHelpers.h>
#include <sophus/se3.hpp> // TODO
#include <df/util/dualQuaternion.h> // TODO
namespace df {
//template <typename Scalar, int K>
//struct InsertionIndexSearchUnroller {
// typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// __host__ __device__
// static inline void search(int & insertionIndex,
// const NNVec & currentNeighbors,
// const Vec3 & voxelCenter,
// const Scalar distance,
// const DeviceTensor1<Vec3> & deformationGraphVertices) {
// const int neighborIndex = currentNeighbors(K-1);
// if (neighborIndex >= 0) {
// const Vec3 & neighborVertex = deformationGraphVertices(neighborIndex);
// const Vec3 neighborDiff = voxelCenter - neighborVertex;
// const Scalar neighborDistance = neighborDiff.squaredNorm();
// if (neighborDistance > distance) {
// insertionIndex = K-1;
// } else {
// return;
// }
// } else {
// insertionIndex = K-1;
// }
// InsertionIndexSearchUnroller<Scalar,K-1>::search(insertionIndex, currentNeighbors.template head<K-1>(),
// voxelCenter, distance, deformationGraphVertices);
// }
//};
//template <typename Scalar>
//struct InsertionIndexSearchUnroller<Scalar, 0> {
// typedef Eigen::Matrix<int,0,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// __host__ __device__
// static inline void search(int & /*insertionIndex*/,
// const NNVec & /*currentNeighbors*/,
// const Vec3 & /*voxelCenter*/,
// const Scalar /*distance*/,
// const DeviceTensor1<Vec3> & /*deformationGraphVertices*/) { }
//};
//// TODO: make work for anisotropic grids
//template <typename Scalar, int K>
//__global__ void updateDeformationGraphNearestNeighborsKernel(Tensor<3,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> nearestNeighborGrid,
// const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > deformationGraphVertices,
// const Eigen::Matrix<int,3,1,Eigen::DontAlign> offset,
// const Eigen::Matrix<int,3,1,Eigen::DontAlign> max,
// const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> vertex, const int index) {
// typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// const int x = offset(0) + threadIdx.x + blockDim.x * blockIdx.x;
// const int y = offset(1) + threadIdx.y + blockDim.y * blockIdx.y;
// const int z = offset(2) + threadIdx.z + blockDim.z * blockIdx.z;
// if (x < max(0) && y < max(1) && z < max(2)) {
//// printf("checking %d,%d,%d\n",x,y,z);
// const Vec3 diff = Vec3(x,y,z) - vertex;
// const Scalar distance = diff.squaredNorm();
//// printf("distance = %f\n",distance);
// NNVec & currentNeighbors = nearestNeighborGrid(x,y,z);
//// printf("current = %d %d %d %d\n",currentNeighbors(0),currentNeighbors(1),currentNeighbors(2),currentNeighbors(3));
// int insertionIndex = -1;
// InsertionIndexSearchUnroller<Scalar,K>::search(insertionIndex,currentNeighbors,Vec3(x,y,z),
// distance, deformationGraphVertices);
//// for (int k = K-1; k >= 0; --k) {
//// const int neighborIndex = currentNeighbors(k);
//// if ( neighborIndex >= 0 ) {
//// const Eigen::Map<const Vec3> neighborVertex(&deformationGraphVertices(0,neighborIndex));
//// const Vec3 neighborDiff = Vec3(x,y,z) - neighborVertex;
//// const Scalar neighborDistance = neighborDiff.squaredNorm();
//// if (neighborDistance > distance) {
//// // inserted index is closer
//// insertionIndex = k;
//// } else {
//// // inserted index is farther, the search ends
//// break;
//// }
//// } else {
//// insertionIndex = k;
//// }
//// }
// // check if the inserted vertex belongs in the updated nearest neighbor list
// if (insertionIndex >= 0) {
// for (int k = K-1; k > insertionIndex; --k) {
// currentNeighbors(k) = currentNeighbors(k-1);
// }
// currentNeighbors(insertionIndex) = index;
// }
// }
//}
//template <typename Scalar, int K>
//void updateDeformationGraphNearestNeighbors(VoxelGrid<Scalar,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> & nearestNeighborGrid,
// const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > & deformationGraphVertices,
// const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> & vertex, const int index,
// const Scalar nearestNeighborSigma, const int nSigmas = 3) {
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// typedef Eigen::Matrix<int,3,1,Eigen::DontAlign> Vec3i;
// const Vec3 nSigmaExtent = nearestNeighborSigma * nSigmas * nearestNeighborGrid.worldToGridScale();
// const Vec3 vertexInGridCoords = nearestNeighborGrid.worldToGrid(vertex);
// const Vec3i boundingBoxMin = (vertexInGridCoords - nSigmaExtent).template cast<int>().cwiseMax(Vec3i(0,0,0));
// const Vec3i boundingBoxMax = (vertexInGridCoords + nSigmaExtent + Scalar(0.99999)*Vec3::Ones()).template cast<int>()
// .cwiseMin(nearestNeighborGrid.dimensions().template cast<int>() - Vec3i::Ones());
// std::cout << vertex.transpose() << std::endl;
// std::cout << boundingBoxMin.transpose() << " -> " << boundingBoxMax.transpose() << std::endl;
// const Vec3i boundingBoxSize = boundingBoxMax - boundingBoxMin;
// const dim3 block(16,16,4);
// const dim3 grid(intDivideAndCeil(boundingBoxSize(0),(int)block.x),
// intDivideAndCeil(boundingBoxSize(1),(int)block.y),
// intDivideAndCeil(boundingBoxSize(2),(int)block.z));
// updateDeformationGraphNearestNeighborsKernel<<<grid,block>>>(nearestNeighborGrid.grid(),deformationGraphVertices,boundingBoxMin,boundingBoxMax,vertex,index);
// cudaDeviceSynchronize();
// CheckCudaDieOnError();
//}
//template <typename Scalar, template <typename,int...> class TransformT, int K>
//void computeDeformationGraphNearestNeighbors(VoxelGrid<Scalar,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> & nearestNeighborGrid,
// const NonrigidTransformer<Scalar,TransformT> & transformer,
// const Scalar nearestNeighborSigma) {
// typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
// typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
// NNVec initialNearestNeighborList = -1 * NNVec::Ones();
// nearestNeighborGrid.fill(initialNearestNeighborList);
// const uint numBaseLevelVertices = transformer.numVerticesAtLevel(0);
// ManagedDeviceTensor1<Vec3> baseLevelVertices( numBaseLevelVertices );
// ConstHostTensor1<Vec3> hostBaseLevelVertices(baseLevelVertices.length(), transformer.deformationGraphVertices(0) );
// baseLevelVertices.copyFrom(hostBaseLevelVertices);
// for (uint index = 0; index < numBaseLevelVertices; ++index) {
// const Vec3 & vertex = transformer.deformationGraphVertices(0)[index];
// updateDeformationGraphNearestNeighbors(nearestNeighborGrid,baseLevelVertices,vertex,index,nearestNeighborSigma);
// }
//}
//template void computeDeformationGraphNearestNeighbors(VoxelGrid<float,Eigen::Matrix<int,4,1,Eigen::DontAlign>,DeviceResident> &,
// const NonrigidTransformer<float,Sophus::SE3Group> &,
// const float);
//template void computeDeformationGraphNearestNeighbors(VoxelGrid<float,Eigen::Matrix<int,4,1,Eigen::DontAlign>,DeviceResident> &,
// const NonrigidTransformer<float,DualQuaternion> &,
// const float);
} // namespace df
|
da726d40717396d7ea008eeabbbaa36fe5ee3421.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#define DOPRINT
// A is the matrix, i is the column index, and m is the # of rows
#define COL(A, i, m) (A + (i)*m)
// A is the matrix, i is the row index, and n is the # of columns
#define ROW(A, i) (A + i)
// A is the matrix, i is the row number, j is the column number, and m is the # of rows
#define ENTRY(A, i, j, m) (A + (j)*m + i)
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <float.h>
#include "common.h"
static float *V;
float *H, *p;
// defined in eq 2.4, p. 3
// never gets past the if statement in our examples
float pi_epsilon(const float y, const float ep) {
if (y >= ep) return y;
if (y <= -ep) return 0;
float c0, c1, c2, c4, c6, c8;
float e2 = ep*ep;
float e3 = e2*ep;
c0 = 35./256*ep;
c1 = 0.5;
c2 = 35./(64*ep);
c4 = -35./(128*e3);
c6 = 7./(64*e3*e2);
c8 = -5./(256*e3*e3*ep);
float y2 = y*y;
float y4 = y2*y2;
float y6 = y4*y2;
float y8 = y4*y4;
return c0 + c1*y + c2*y2 + c4*y4 + c6*y6 + c8*y8;
}
// volatility; given t, returns a vector containing the volatility at that time
// and xi
inline float sigma(const float t, const float x) {
return 0.2 + 0.2*(1 - t)*((x/25 - 1.2)*(x/25 - 1.2)/((x/25)*(x/25) + 1.44));
}
// returns beta; fills in H and V
// I'll assume that these are all device pointers and that cuda has been initialized properly,
// etc: except for H, which is a host pointer, for convenience (to me, but probably not to anyone
// else reading this)
// Also, I'm assuming that V is zeroed out; it will mess me up if it isn't
float arnoldi(hipblasHandle_t handle, const float *A, const float *v, const int n, const int m, float *H_h) {
hipblasStatus_t status;
hipError_t error;
float beta, beta_inv, h_ij, one = 1, zero = 0;
int i, j, k;
status = hipblasSnrm2(handle, n, v, 1, &beta);
checkCublasStatus(status);
// so beta is correct
beta_inv = 1./beta;
// V.col(0) = v/beta;
status = hipblasSaxpy(handle, n, &beta_inv, v, 1, V, 1);
checkCublasStatus(status);
for (j = 0; j < m - 1; j++) {
// p = A*V.col(j);
status = hipblasSgemv(handle, HIPBLAS_OP_N, n, n, &one, A, n, COL(V, j, n), 1, &zero, p, 1);
checkCublasStatus(status);
for (i = 0; i <= j; i++) {
// H(i, j) = cdot(V.col(i), p);
status = hipblasSdot(handle, n, p, 1, COL(V, i, n), 1, ENTRY(H_h, i, j, m));
checkCublasStatus(status);
// p -= H(i, j)*V.col(i);
h_ij = -(*ENTRY(H_h, i, j, m));
status = hipblasSaxpy(handle, n, &h_ij, COL(V, i, n), 1, p, 1);
checkCublasStatus(status);
}
// so p is correct when j == 0, as is norm(p)
// H(j + 1, j) = norm(p);
status = hipblasSnrm2(handle, n, p, 1, ENTRY(H_h, j + 1, j, m));
checkCublasStatus(status);
h_ij = 1./(*(ENTRY(H_h, j + 1, j, m)));
// V.col(j + 1) = p/H(j + 1, j);
status = hipblasSaxpy(handle, n, &h_ij, p, 1, COL(V, j + 1, n), 1);
checkCublasStatus(status);
}
// p = A*V.col(m - 1);
status = hipblasSgemv(handle, HIPBLAS_OP_N, n, n, &one, A, n, COL(V, m - 1, n), 1, &zero, p, 1);
checkCublasStatus(status);
for (i = 0; i <= m - 1; i++) {
// H(i, m - 1) = cdot(V.col(i), p);
status = hipblasSdot(handle, n, p, 1, COL(V, i, n), 1, ENTRY(H_h, i, m - 1, m));
checkCublasStatus(status);
// p -= H(i, m - 1)*V.col(i);
h_ij = -(*ENTRY(H_h, i, m - 1, m));
status = hipblasSaxpy(handle, n, &h_ij, COL(V, i, n), 1, p, 1);
checkCublasStatus(status);
}
return beta;
}
// TODO: Can overwrite H with E in expcuda.c, since we only need it as input to that function
// actually not anymore, with phi as it is
// MAKE SURE THAT I KEEP TRACK OF DEVICE/HOST PTRS
// all the pointers passed here are device pointers
void krylov(hipblasHandle_t handle, float *w, int m, float l, const float *A, int n, const float *v, int expo) {
// remember to zero out V
hipError_t error;
hipblasStatus_t status;
float *H_h = 0, *E = 0, zero = 0, beta;
int i, j;
error = hipMemset((void*)V, 0, n*m*sizeof(V[0]));
checkError(error);
H_h = (float *)malloc(m*m*sizeof(H_h[0]));
if (H_h == 0) {
fprintf(stderr, "Malloc of H failed\n");
exit(1);
}
memset((void*)H_h, 0, m*m*sizeof(H_h[0]));
beta = arnoldi(handle, A, v, n, m, H_h);
error = hipMalloc((void**)&E, m*m*sizeof(E[0]));
checkError(error);
error = hipMemcpy(H, H_h, m*m*sizeof(H_h[0]), hipMemcpyHostToDevice);
checkError(error);
// scale H by l
status = hipblasSscal(handle, m*m, &l, H, 1);
checkCublasStatus(status);
if (expo == 1) padeExp(handle, H, E, m);
else phi(handle, H, E, m);
// w = beta*V*matrix_exp(l*H)*e_0;
// so instead of having e_0, I can calculate the product w/o it, and
// copy the first row of it into w
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, m, &beta, V, n, E, m, &zero, V, n);
checkCublasStatus(status);
// get first row of bVe and copy it into w
status = hipblasScopy(handle, n, V, 1, w, 1);
checkCublasStatus(status);
// free everything
free(H_h);
error = hipFree(E);
checkError(error);
}
int main(int argc, char *argv[]) {
// we are going to need a vector of x_i's
// which depend on S_max, K, and N
// we'll hardcode those here
hipError_t error;
hipblasStatus_t status;
hipblasHandle_t handle;
culaStatus culaS;
//clock_t begin, end;
//double time_spent;
// k, confusingly named, is the dimensions to be projected upon
FILE *fp;
int k = 50;
float K = 25;
float S_max = 4*K;
int N = atoi(argv[1]);
int M = 200;
float epsilon = .0001;
float r = .06;
char filename[100];
sprintf(filename,"outputs/cuda%d_%d.txt", M, N);
// l is the time step
float l = 1./(M - 1);
// h is the x tep
float h = S_max/(N - 1);
float *U = 0, *U_h, *A;
float zero = 0;
int i, j;
float t, *a, *b, *c, *f, *v, *w1, *w2;
//begin = clock();
status = hipblasCreate(&handle);
checkCublasStatus(status);
culaS = culaInitialize();
checkCulaStatus(culaS);
// we will have a matrix U, where the ij^th entry is the value of the option at time t = t_i
// and x = x_j
// the dimensions of this matrix will be M*N
// M is the time axis
// N is the x axis
// We keep U as device storage, because we don't need it till the end
error = hipMalloc((void **)&U, M*N*sizeof(U[0]));
checkError(error);
U_h = (float*)malloc(M*N*sizeof(U_h[0]));
if (U_h == 0) {
fprintf(stderr, "Error with malloc of U_h\n");
exit(1);
}
error = hipMalloc((void **)&p, N*sizeof(p[0]));
checkError(error);
error = hipMalloc((void **)&H, k*k*sizeof(H[0]));
checkError(error);
error = hipMalloc((void **)&V, k*N*sizeof(V[0]));
checkError(error);
// fill with zeros
error = hipMemset(U, 0, M*N*sizeof(U[0]));
checkError(error);
// we can determine the values of the U_0j^th entries
// This could be done in ||, but it's only done once so its
// probably not worth it
for (j = 0; j < N; j++) {
*ENTRY(U_h, 0, j, M) = pi_epsilon(h*j - K, epsilon);
}
// copy row of U_h to U
status = hipblasSetVector(N, sizeof(U_h[0]), U_h, M, U, M);
checkCublasStatus(status);
// now we need to fill in the A matrix, which is a function of t
// so let's loop over t
// we'll allocate A only once, same for a, b, and c, except we don't have
// to zero those out
error = hipMalloc((void **)&A, N*N*sizeof(A[0]));
checkError(error);
error = hipMemset((void*)A, 0, N*N*sizeof(A[0]));
checkError(error);
error = hipMalloc((void **)&f, N*sizeof(f[0]));
checkError(error);
error = hipMemset((void*)f, 0, N*sizeof(f[0]));
checkError(error);
error = hipMalloc((void **)&a, N*N*sizeof(a[0]));
checkError(error);
error = hipMalloc((void **)&b, N*N*sizeof(b[0]));
checkError(error);
error = hipMalloc((void **)&c, N*N*sizeof(c[0]));
checkError(error);
error = hipMalloc((void **)&v, N*sizeof(v[0]));
checkError(error);
error = hipMalloc((void **)&w1, N*sizeof(w1[0]));
checkError(error);
error = hipMalloc((void **)&w2, N*sizeof(w2[0]));
checkError(error);
int blockSize = 16;
int threadsPerBlock = blockSize*blockSize;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
dim3 threads(blockSize, blockSize);
dim3 grid((N + blockSize - 1)/blockSize, (N + blockSize - 1)/blockSize);
for (i = 0; i < M - 1; i++) {
t = i*l;
hipLaunchKernelGGL(( initabc), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a, b, c, N, t, h);
hipLaunchKernelGGL(( initA), dim3(grid), dim3(threads), 0, 0, a, b, c, A, N);
// now we need f(t) at x_0 and x_N
// see 2.14
float sigSq = sigma(t, (N-1)*h);
sigSq *= sigSq;
float cN = sigSq*(N-1)*(N-1)/2. + r*(N-1)/2.;
float f_N = (S_max - K*exp(-t*r))*cN;
status = hipblasSetVector(1, sizeof(f_N), &f_N, 1, f + N - 1, 1);
checkCublasStatus(status);
status = hipblasScopy(handle, N, ROW(U, i), M, v, 1);
checkCublasStatus(status);
// so A and v are correct; the issue is something inside krylov
krylov(handle, w1, k, l, A, N, v, 1);
krylov(handle, w2, k, l, A, N, f, 0);
status = hipblasSaxpy(handle, N, &l, w2, 1, w1, 1);
checkCublasStatus(status);
status = hipblasScopy(handle, N, w1, 1, ROW(U, i + 1), M);
checkCublasStatus(status);
}
#ifdef DOPRINT
// copy to U_h
status = hipblasGetVector(M*N, sizeof(U[0]), U, 1, U_h, 1);
fp = fopen(filename, "w");
for (i = 0; i < M; i++) {
float t = i*l;
for (j = 0; j < N; j++) {
float x = j*h;
fprintf(fp, "%f %f %f\n", t, x, *ENTRY(U_h, i, j, M));
}
}
#endif
// cleanup
free(U_h);
error = hipFree(p);
checkError(error);
error = hipFree(f);
checkError(error);
error = hipFree(A);
checkError(error);
error = hipFree(V);
checkError(error);
error = hipFree(H);
checkError(error);
error = hipFree(U);
checkError(error);
error = hipFree(v);
checkError(error);
error = hipFree(w1);
checkError(error);
error = hipFree(w2);
checkError(error);
error = hipFree(a);
checkError(error);
error = hipFree(b);
checkError(error);
error = hipFree(c);
checkError(error);
status = hipblasDestroy(handle);
checkCublasStatus(status);
culaShutdown();
//end = clock();
//time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
//printf("%d %f\n", N, time_spent);
return 0;
}
| da726d40717396d7ea008eeabbbaa36fe5ee3421.cu | //#define DOPRINT
// A is the matrix, i is the column index, and m is the # of rows
#define COL(A, i, m) (A + (i)*m)
// A is the matrix, i is the row index, and n is the # of columns
#define ROW(A, i) (A + i)
// A is the matrix, i is the row number, j is the column number, and m is the # of rows
#define ENTRY(A, i, j, m) (A + (j)*m + i)
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <float.h>
#include "common.h"
static float *V;
float *H, *p;
// defined in eq 2.4, p. 3
// never gets past the if statement in our examples
float pi_epsilon(const float y, const float ep) {
if (y >= ep) return y;
if (y <= -ep) return 0;
float c0, c1, c2, c4, c6, c8;
float e2 = ep*ep;
float e3 = e2*ep;
c0 = 35./256*ep;
c1 = 0.5;
c2 = 35./(64*ep);
c4 = -35./(128*e3);
c6 = 7./(64*e3*e2);
c8 = -5./(256*e3*e3*ep);
float y2 = y*y;
float y4 = y2*y2;
float y6 = y4*y2;
float y8 = y4*y4;
return c0 + c1*y + c2*y2 + c4*y4 + c6*y6 + c8*y8;
}
// volatility; given t, returns a vector containing the volatility at that time
// and xi
inline float sigma(const float t, const float x) {
return 0.2 + 0.2*(1 - t)*((x/25 - 1.2)*(x/25 - 1.2)/((x/25)*(x/25) + 1.44));
}
// returns beta; fills in H and V
// I'll assume that these are all device pointers and that cuda has been initialized properly,
// etc: except for H, which is a host pointer, for convenience (to me, but probably not to anyone
// else reading this)
// Also, I'm assuming that V is zeroed out; it will mess me up if it isn't
float arnoldi(cublasHandle_t handle, const float *A, const float *v, const int n, const int m, float *H_h) {
cublasStatus_t status;
cudaError_t error;
float beta, beta_inv, h_ij, one = 1, zero = 0;
int i, j, k;
status = cublasSnrm2(handle, n, v, 1, &beta);
checkCublasStatus(status);
// so beta is correct
beta_inv = 1./beta;
// V.col(0) = v/beta;
status = cublasSaxpy(handle, n, &beta_inv, v, 1, V, 1);
checkCublasStatus(status);
for (j = 0; j < m - 1; j++) {
// p = A*V.col(j);
status = cublasSgemv(handle, CUBLAS_OP_N, n, n, &one, A, n, COL(V, j, n), 1, &zero, p, 1);
checkCublasStatus(status);
for (i = 0; i <= j; i++) {
// H(i, j) = cdot(V.col(i), p);
status = cublasSdot(handle, n, p, 1, COL(V, i, n), 1, ENTRY(H_h, i, j, m));
checkCublasStatus(status);
// p -= H(i, j)*V.col(i);
h_ij = -(*ENTRY(H_h, i, j, m));
status = cublasSaxpy(handle, n, &h_ij, COL(V, i, n), 1, p, 1);
checkCublasStatus(status);
}
// so p is correct when j == 0, as is norm(p)
// H(j + 1, j) = norm(p);
status = cublasSnrm2(handle, n, p, 1, ENTRY(H_h, j + 1, j, m));
checkCublasStatus(status);
h_ij = 1./(*(ENTRY(H_h, j + 1, j, m)));
// V.col(j + 1) = p/H(j + 1, j);
status = cublasSaxpy(handle, n, &h_ij, p, 1, COL(V, j + 1, n), 1);
checkCublasStatus(status);
}
// p = A*V.col(m - 1);
status = cublasSgemv(handle, CUBLAS_OP_N, n, n, &one, A, n, COL(V, m - 1, n), 1, &zero, p, 1);
checkCublasStatus(status);
for (i = 0; i <= m - 1; i++) {
// H(i, m - 1) = cdot(V.col(i), p);
status = cublasSdot(handle, n, p, 1, COL(V, i, n), 1, ENTRY(H_h, i, m - 1, m));
checkCublasStatus(status);
// p -= H(i, m - 1)*V.col(i);
h_ij = -(*ENTRY(H_h, i, m - 1, m));
status = cublasSaxpy(handle, n, &h_ij, COL(V, i, n), 1, p, 1);
checkCublasStatus(status);
}
return beta;
}
// TODO: Can overwrite H with E in expcuda.c, since we only need it as input to that function
// actually not anymore, with phi as it is
// MAKE SURE THAT I KEEP TRACK OF DEVICE/HOST PTRS
// all the pointers passed here are device pointers
void krylov(cublasHandle_t handle, float *w, int m, float l, const float *A, int n, const float *v, int expo) {
// remember to zero out V
cudaError_t error;
cublasStatus_t status;
float *H_h = 0, *E = 0, zero = 0, beta;
int i, j;
error = cudaMemset((void*)V, 0, n*m*sizeof(V[0]));
checkError(error);
H_h = (float *)malloc(m*m*sizeof(H_h[0]));
if (H_h == 0) {
fprintf(stderr, "Malloc of H failed\n");
exit(1);
}
memset((void*)H_h, 0, m*m*sizeof(H_h[0]));
beta = arnoldi(handle, A, v, n, m, H_h);
error = cudaMalloc((void**)&E, m*m*sizeof(E[0]));
checkError(error);
error = cudaMemcpy(H, H_h, m*m*sizeof(H_h[0]), cudaMemcpyHostToDevice);
checkError(error);
// scale H by l
status = cublasSscal(handle, m*m, &l, H, 1);
checkCublasStatus(status);
if (expo == 1) padeExp(handle, H, E, m);
else phi(handle, H, E, m);
// w = beta*V*matrix_exp(l*H)*e_0;
// so instead of having e_0, I can calculate the product w/o it, and
// copy the first row of it into w
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, m, &beta, V, n, E, m, &zero, V, n);
checkCublasStatus(status);
// get first row of bVe and copy it into w
status = cublasScopy(handle, n, V, 1, w, 1);
checkCublasStatus(status);
// free everything
free(H_h);
error = cudaFree(E);
checkError(error);
}
int main(int argc, char *argv[]) {
// we are going to need a vector of x_i's
// which depend on S_max, K, and N
// we'll hardcode those here
cudaError_t error;
cublasStatus_t status;
cublasHandle_t handle;
culaStatus culaS;
//clock_t begin, end;
//double time_spent;
// k, confusingly named, is the dimensions to be projected upon
FILE *fp;
int k = 50;
float K = 25;
float S_max = 4*K;
int N = atoi(argv[1]);
int M = 200;
float epsilon = .0001;
float r = .06;
char filename[100];
sprintf(filename,"outputs/cuda%d_%d.txt", M, N);
// l is the time step
float l = 1./(M - 1);
// h is the x tep
float h = S_max/(N - 1);
float *U = 0, *U_h, *A;
float zero = 0;
int i, j;
float t, *a, *b, *c, *f, *v, *w1, *w2;
//begin = clock();
status = cublasCreate(&handle);
checkCublasStatus(status);
culaS = culaInitialize();
checkCulaStatus(culaS);
// we will have a matrix U, where the ij^th entry is the value of the option at time t = t_i
// and x = x_j
// the dimensions of this matrix will be M*N
// M is the time axis
// N is the x axis
// We keep U as device storage, because we don't need it till the end
error = cudaMalloc((void **)&U, M*N*sizeof(U[0]));
checkError(error);
U_h = (float*)malloc(M*N*sizeof(U_h[0]));
if (U_h == 0) {
fprintf(stderr, "Error with malloc of U_h\n");
exit(1);
}
error = cudaMalloc((void **)&p, N*sizeof(p[0]));
checkError(error);
error = cudaMalloc((void **)&H, k*k*sizeof(H[0]));
checkError(error);
error = cudaMalloc((void **)&V, k*N*sizeof(V[0]));
checkError(error);
// fill with zeros
error = cudaMemset(U, 0, M*N*sizeof(U[0]));
checkError(error);
// we can determine the values of the U_0j^th entries
// This could be done in ||, but it's only done once so its
// probably not worth it
for (j = 0; j < N; j++) {
*ENTRY(U_h, 0, j, M) = pi_epsilon(h*j - K, epsilon);
}
// copy row of U_h to U
status = cublasSetVector(N, sizeof(U_h[0]), U_h, M, U, M);
checkCublasStatus(status);
// now we need to fill in the A matrix, which is a function of t
// so let's loop over t
// we'll allocate A only once, same for a, b, and c, except we don't have
// to zero those out
error = cudaMalloc((void **)&A, N*N*sizeof(A[0]));
checkError(error);
error = cudaMemset((void*)A, 0, N*N*sizeof(A[0]));
checkError(error);
error = cudaMalloc((void **)&f, N*sizeof(f[0]));
checkError(error);
error = cudaMemset((void*)f, 0, N*sizeof(f[0]));
checkError(error);
error = cudaMalloc((void **)&a, N*N*sizeof(a[0]));
checkError(error);
error = cudaMalloc((void **)&b, N*N*sizeof(b[0]));
checkError(error);
error = cudaMalloc((void **)&c, N*N*sizeof(c[0]));
checkError(error);
error = cudaMalloc((void **)&v, N*sizeof(v[0]));
checkError(error);
error = cudaMalloc((void **)&w1, N*sizeof(w1[0]));
checkError(error);
error = cudaMalloc((void **)&w2, N*sizeof(w2[0]));
checkError(error);
int blockSize = 16;
int threadsPerBlock = blockSize*blockSize;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
dim3 threads(blockSize, blockSize);
dim3 grid((N + blockSize - 1)/blockSize, (N + blockSize - 1)/blockSize);
for (i = 0; i < M - 1; i++) {
t = i*l;
initabc<<<blocksPerGrid, threadsPerBlock>>>(a, b, c, N, t, h);
initA<<<grid, threads>>>(a, b, c, A, N);
// now we need f(t) at x_0 and x_N
// see 2.14
float sigSq = sigma(t, (N-1)*h);
sigSq *= sigSq;
float cN = sigSq*(N-1)*(N-1)/2. + r*(N-1)/2.;
float f_N = (S_max - K*exp(-t*r))*cN;
status = cublasSetVector(1, sizeof(f_N), &f_N, 1, f + N - 1, 1);
checkCublasStatus(status);
status = cublasScopy(handle, N, ROW(U, i), M, v, 1);
checkCublasStatus(status);
// so A and v are correct; the issue is something inside krylov
krylov(handle, w1, k, l, A, N, v, 1);
krylov(handle, w2, k, l, A, N, f, 0);
status = cublasSaxpy(handle, N, &l, w2, 1, w1, 1);
checkCublasStatus(status);
status = cublasScopy(handle, N, w1, 1, ROW(U, i + 1), M);
checkCublasStatus(status);
}
#ifdef DOPRINT
// copy to U_h
status = cublasGetVector(M*N, sizeof(U[0]), U, 1, U_h, 1);
fp = fopen(filename, "w");
for (i = 0; i < M; i++) {
float t = i*l;
for (j = 0; j < N; j++) {
float x = j*h;
fprintf(fp, "%f %f %f\n", t, x, *ENTRY(U_h, i, j, M));
}
}
#endif
// cleanup
free(U_h);
error = cudaFree(p);
checkError(error);
error = cudaFree(f);
checkError(error);
error = cudaFree(A);
checkError(error);
error = cudaFree(V);
checkError(error);
error = cudaFree(H);
checkError(error);
error = cudaFree(U);
checkError(error);
error = cudaFree(v);
checkError(error);
error = cudaFree(w1);
checkError(error);
error = cudaFree(w2);
checkError(error);
error = cudaFree(a);
checkError(error);
error = cudaFree(b);
checkError(error);
error = cudaFree(c);
checkError(error);
status = cublasDestroy(handle);
checkCublasStatus(status);
culaShutdown();
//end = clock();
//time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
//printf("%d %f\n", N, time_spent);
return 0;
}
|
9a36cb98c69243972937bf3afcf4fea4bf98552f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_op.h"
#include "caffe2/operators/logit_op.h"
namespace caffe2 {
template <typename T>
__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = fminf(X[i], (1.0 - eps));
Y[i] = fmaxf(Y[i], eps);
Y[i] = logf(Y[i] / (1.0 - Y[i]));
}
}
__global__ void LogitGradientKernel(
const int N,
const float* X,
const float* dY,
const float eps,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] < eps || X[i] > 1.0 - eps) ? 0 : (dY[i] / X[i] / (1 - X[i]));
}
}
struct LogitCUDAFunctor {
explicit LogitCUDAFunctor(OperatorBase& op)
: eps_(op.GetSingleArgument<float>("eps", 1e-6)) {
CAFFE_ENFORCE_GT(eps_, 0.0);
CAFFE_ENFORCE_LT(eps_, 0.5);
}
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
hipLaunchKernelGGL(( LogitKernel<T>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
device_context->cuda_stream(), n, x, eps_, y);
return;
}
private:
float eps_;
};
template <>
bool LogitGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
int n = X.size();
hipLaunchKernelGGL(( LogitGradientKernel),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
n, X.data<float>(), dY.data<float>(), eps_, dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
Logit,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
LogitCUDAFunctor>);
REGISTER_CUDA_OPERATOR(LogitGradient, LogitGradientOp<float, CUDAContext>);
} // namespace caffe2
| 9a36cb98c69243972937bf3afcf4fea4bf98552f.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_op.h"
#include "caffe2/operators/logit_op.h"
namespace caffe2 {
template <typename T>
__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = fminf(X[i], (1.0 - eps));
Y[i] = fmaxf(Y[i], eps);
Y[i] = logf(Y[i] / (1.0 - Y[i]));
}
}
__global__ void LogitGradientKernel(
const int N,
const float* X,
const float* dY,
const float eps,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] < eps || X[i] > 1.0 - eps) ? 0 : (dY[i] / X[i] / (1 - X[i]));
}
}
struct LogitCUDAFunctor {
explicit LogitCUDAFunctor(OperatorBase& op)
: eps_(op.GetSingleArgument<float>("eps", 1e-6)) {
CAFFE_ENFORCE_GT(eps_, 0.0);
CAFFE_ENFORCE_LT(eps_, 0.5);
}
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
LogitKernel<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
device_context->cuda_stream()>>>(n, x, eps_, y);
return;
}
private:
float eps_;
};
template <>
bool LogitGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
int n = X.size();
LogitGradientKernel<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
n, X.data<float>(), dY.data<float>(), eps_, dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
Logit,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
LogitCUDAFunctor>);
REGISTER_CUDA_OPERATOR(LogitGradient, LogitGradientOp<float, CUDAContext>);
} // namespace caffe2
|
7ad4fa056b4112cf3f23aae2ef7a715864998a63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#include <math.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define N (500)
#define k (20)
const int threadsPerBlock = 1024;
__global__ void sortKernel(int* dev_arr, int* helper)
{
int schet = 0;
int powtor = 0;
__shared__ int temp[1024];
if (blockIdx.x * blockDim.x + threadIdx.x < N)
{
temp[threadIdx.x] = dev_arr[blockIdx.x * blockDim.x + threadIdx.x];
for (int i = 0; i < N; ++i) {
if (dev_arr[i] == temp[threadIdx.x])
++powtor; //
}
helper[dev_arr[blockIdx.x * blockDim.x + threadIdx.x]] = powtor;
// dev_arr[schet] = temp[threadIdx.x];
}
}
__global__ void sortKernelFinal(int* dev_arr, int* helper)
{
int b = 0;
for (int i = 0; i < k + 1; ++i) {
for (int j = 0; j < helper[i]; ++j) {
dev_arr[b++] = i;
}
}
}
__global__ void get_arr(int* dev_arr) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0;
if (idx < N) {
if (idx < k - 2)
{
if (idx % 2 == 0)
dev_arr[idx] = idx;
else
dev_arr[idx] = idx + 2;
}
else
dev_arr[idx] = idx % k;
printf(" %d, ", dev_arr[idx]);
}
/* if (idx < N) {
dev_arr[idx] = N - idx;
printf(" %d, ", dev_arr[idx]);
}*/
//
}
__global__ void get_arr_zero(int* dev_zero) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < k) {
dev_zero[idx] = 0;
}
//printf(" %d, ", dev_zero[idx]);
}
void get_array_for_CPU(int* mas) {
for (int i = 0; i < N; i++) {
mas[i] = rand() % k;;
}
for (int i = 0; i < N - 1; i++)
printf(" %d, ", mas[i]);
printf("\n");
}
void sort_for_CPU(int* mas, int* masHelper)
{
for (int i = 0; i < N - 1; ++i) {
++masHelper[mas[i]];
}
int b = 0;
for (int i = 0; i < k + 1; ++i) {
for (int j = 0; j < masHelper[i]; ++j) {
mas[b++] = i;
}
}
printf("after sort \n");
for (int i = 0; i < N - 1; i++)
printf(" %d, ", mas[i]);
}
int main() {
//GPU
int* host_arr = new int[N];
int* dev_arr = new int[N];
// int* dev_res = new int[N];
int* dev_help = new int[k];
float elapsedTimeInMs = 0.0f;
hipDeviceReset();
hipMalloc((void**)&dev_arr, N * sizeof(int));
// hipMalloc((void**)&dev_res, N * sizeof(int));
hipMalloc((void**)&dev_help, N * sizeof(int));
get_arr << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr);
// get_arr_zero << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_help);
// printf(" -------------------------------------------- \n");
//show_arr << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr);
printf("\n");
printf(" -------------------------------------------- \n");
printf(" GPU \n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipEventSynchronize(start);
hipDeviceSynchronize();
sortKernel << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr, dev_help);
sortKernelFinal << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr, dev_help);
hipDeviceSynchronize();
hipMemcpy(host_arr, dev_arr, N * sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
printf(" \n");
printf(" -------------------------------------------- \n");
for (int i = 0; i < N - 1; i++)
printf(" %d, ", host_arr[i]);
//for (int i = 0; i < N; i++)
// printf("=>%d", host_arr[i]);
hipFree(dev_arr);
delete[]host_arr;
printf("Time in GPU %f\n", elapsedTimeInMs / 1000);
printf(" -------------------------------------------- \n");
printf(" -------------------------------------------- \n");
printf(" CPU \n");
printf(" -------------------------------------------- \n");
printf(" -------------------------------------------- \n");
//CPU
int* a = new int[N];
int masHelper[k] = { 0 };
clock_t start2;
double time2;
start2 = clock();
get_array_for_CPU(a);
sort_for_CPU(a, masHelper);
time2 = (double)(clock() - start2) / CLOCKS_PER_SEC;
printf("Time in CPU %f\n", time2);
return 0;
} | 7ad4fa056b4112cf3f23aae2ef7a715864998a63.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#include <math.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define N (500)
#define k (20)
const int threadsPerBlock = 1024;
__global__ void sortKernel(int* dev_arr, int* helper)
{
int schet = 0;
int powtor = 0;
__shared__ int temp[1024];
if (blockIdx.x * blockDim.x + threadIdx.x < N)
{
temp[threadIdx.x] = dev_arr[blockIdx.x * blockDim.x + threadIdx.x];
for (int i = 0; i < N; ++i) {
if (dev_arr[i] == temp[threadIdx.x])
++powtor; //позиция в результате
}
helper[dev_arr[blockIdx.x * blockDim.x + threadIdx.x]] = powtor;
// dev_arr[schet] = temp[threadIdx.x];
}
}
__global__ void sortKernelFinal(int* dev_arr, int* helper)
{
int b = 0;
for (int i = 0; i < k + 1; ++i) {
for (int j = 0; j < helper[i]; ++j) {
dev_arr[b++] = i;
}
}
}
__global__ void get_arr(int* dev_arr) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0;
if (idx < N) {
if (idx < k - 2)
{
if (idx % 2 == 0)
dev_arr[idx] = idx;
else
dev_arr[idx] = idx + 2;
}
else
dev_arr[idx] = idx % k;
printf(" %d, ", dev_arr[idx]);
}
/* if (idx < N) {
dev_arr[idx] = N - idx;
printf(" %d, ", dev_arr[idx]);
}*/
//
}
__global__ void get_arr_zero(int* dev_zero) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < k) {
dev_zero[idx] = 0;
}
//printf(" %d, ", dev_zero[idx]);
}
void get_array_for_CPU(int* mas) {
for (int i = 0; i < N; i++) {
mas[i] = rand() % k;;
}
for (int i = 0; i < N - 1; i++)
printf(" %d, ", mas[i]);
printf("\n");
}
void sort_for_CPU(int* mas, int* masHelper)
{
for (int i = 0; i < N - 1; ++i) {
++masHelper[mas[i]];
}
int b = 0;
for (int i = 0; i < k + 1; ++i) {
for (int j = 0; j < masHelper[i]; ++j) {
mas[b++] = i;
}
}
printf("after sort \n");
for (int i = 0; i < N - 1; i++)
printf(" %d, ", mas[i]);
}
int main() {
//GPU
int* host_arr = new int[N];
int* dev_arr = new int[N];
// int* dev_res = new int[N];
int* dev_help = new int[k];
float elapsedTimeInMs = 0.0f;
cudaDeviceReset();
cudaMalloc((void**)&dev_arr, N * sizeof(int));
// cudaMalloc((void**)&dev_res, N * sizeof(int));
cudaMalloc((void**)&dev_help, N * sizeof(int));
get_arr << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr);
// get_arr_zero << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_help);
// printf(" -------------------------------------------- \n");
//show_arr << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr);
printf("\n");
printf(" -------------------------------------------- \n");
printf(" GPU \n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
cudaThreadSynchronize();
sortKernel << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr, dev_help);
sortKernelFinal << <dim3(((N + 511) / 512), 1), dim3(threadsPerBlock, 1) >> > (dev_arr, dev_help);
cudaThreadSynchronize();
cudaMemcpy(host_arr, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
printf(" \n");
printf(" -------------------------------------------- \n");
for (int i = 0; i < N - 1; i++)
printf(" %d, ", host_arr[i]);
//for (int i = 0; i < N; i++)
// printf("=>%d", host_arr[i]);
cudaFree(dev_arr);
delete[]host_arr;
printf("Time in GPU %f\n", elapsedTimeInMs / 1000);
printf(" -------------------------------------------- \n");
printf(" -------------------------------------------- \n");
printf(" CPU \n");
printf(" -------------------------------------------- \n");
printf(" -------------------------------------------- \n");
//CPU
int* a = new int[N];
int masHelper[k] = { 0 };
clock_t start2;
double time2;
start2 = clock();
get_array_for_CPU(a);
sort_for_CPU(a, masHelper);
time2 = (double)(clock() - start2) / CLOCKS_PER_SEC;
printf("Time in CPU %f\n", time2);
return 0;
} |
4846fc4c26ee2693cf71a81bec764806fcb31553.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <string>
#include <stdlib.h>
#include <hip/hip_runtime.h>
using namespace std;
#define THREAD_SIZE 8
#define FUNC(a,b) ((a) + (b) - 1)/(b)
#define HD2(x0,y0) ((x0) + (y0) * data->x)
#define HD2_1(x0,y0) ((x0) + (y0) * data.x)
#define HD3(x0,y0,z0) ((x0) + (y0) * data->x + (z0) * data->x * data->y)
#define HD3_1(x0,y0,z0) ((x0) + (y0) * data.x + (z0) * data.x * data.y)
struct BLOCK
{
int x;
int y;
int z;
int width;
int height;
int depth;
float temp;
};
struct CONF
{
int dimension;
float n;
int num_step;
int x;
int y;
int z;
float def_temp; //default starting temperature for nodes
vector<BLOCK> heat_block;
};
void read(string & path, CONF & data) {
fstream theconf(path, ios_base::in);
string s;
int s_num;
if (!theconf.is_open()) {
cerr << "file cannot be opened.\n";
}
while (getline(theconf, s)) {
if (s[0] == '#' || s[0] == ' ') {
continue;
}
if (s_num == 0) {
if (s[0] == '2') {
data.dimension = 2;
} else {
data.dimension = 3;
}
} else if (s_num == 1) {
data.n = stof(s);
cout << "n = " << data.n << endl;
} else if (s_num == 2) {
data.num_step = stoi(s);
cout << "timestep = " << data.num_step << endl;
} else if (s_num == 3) {
stringstream ss(s);
ss.str(s);
char c;
if (data.dimension == 2) {
ss >> data.x >> c >> data.y;
data.z = 1;
cout << "width = " << data.x << endl;
cout << "height = " << data.y << endl;
cout << "depth = " << data.z << endl;
}
else if (data.dimension == 3) {
ss >> data.x >> c >> data.y >> c >> data.z;
cout << "width = " << data.x << endl;
cout << "height = " << data.y << endl;
cout << "depth = " << data.z << endl;
}
}
else if (s_num == 4) {
data.def_temp = stof(s);
}
else{
stringstream ss(s);
ss.str(s);
char c;
BLOCK b;
if (data.dimension == 2) {
ss >> b.x >> c >> b.y >> c >> b.width >> c >> b.height >> c >> b.temp;
b.z = 0; b.depth = 1;
cout << "fixed blocks: " << endl;
cout << "(" << b.x << ',' << b.y << ',' << b.z << "): width = " << b.width << " height = " << b.height << " depth = " << b.depth << " temp = " << b.temp << endl;
}
else if (data.dimension == 3) {
ss >> b.x >> c >> b.y >> c >> b.z >> c >> b.width >> c >> b.height >> c >> b.depth >> c >> b.temp;
cout << "fixed blocks: " << endl;
cout << "(" << b.x << ',' << b.y << ',' << b.z << "): width = " << b.width << " height = " << b.height << " depth = " << b.depth << " temp = " << b.temp << endl;
}
data.heat_block.push_back(b);
}
s_num++;
}
theconf.close();
}
//default temperature in heat blocks
void initialize(CONF &data, vector<float> &u) {
u.assign(data.x * data.y * data.z, data.def_temp);
for (int i = 0; i < data.heat_block.size(); i++) {
BLOCK b = data.heat_block[i];
for (int m = b.z; m < b.z + b.depth; m++) {
for (int j = b.y; j < b.y + b.height; j++) {
for (int n = b.x; n < b.x + b.width; n++) {
u[HD3_1(n, j, m)] = b.temp;
}
}
}
}
}
//output csv file
void print(const CONF &data, const vector<float> &u) {
char filename[] = "heat2D3Doutput.csv";
ofstream fout(filename);
for (int m = 0; m < data.z; m++) {
for (int j = 0; j < data.y; j++) {
for (int n = 0; n < data.x - 1; n++) {
fout << u[HD3_1(n, j, m)] << ',';
}
fout << u[HD3_1(data.x - 1, j, m)] << endl;
}
fout << endl;
}
fout.close();
}
//Decide the node to be change(0) or not(1)
__device__ bool changeBLO(int x0, int y0, BLOCK* &heat_block, int count) {
for (int i = 0; i < count; i++) {
if (x0 >= heat_block[i].x && x0 < heat_block[i].x + heat_block[i].width &&
y0 >= heat_block[i].y && y0 < heat_block[i].y + heat_block[i].height)
//z0 >= heat_block[i].z && z0 < heat_block[i].z + heat_block[i].depth
return true;
}
return false;
}
__device__ bool changeBLO(int x0, int y0, int z0, BLOCK* &heat_block, int count) {
for (int i = 0; i < count; i++) {
if (x0 >= heat_block[i].x && x0 < heat_block[i].x + heat_block[i].width &&
y0 >= heat_block[i].y && y0 < heat_block[i].y + heat_block[i].height &&
z0 >= heat_block[i].z && z0 < heat_block[i].z + heat_block[i].depth)
return true;
}
return false;
}
__global__ void heat2D(float *u, float *u_new, CONF *data, BLOCK *pHeater, int count){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
//int z = blockDim.z * blockIdx.z;
if (x < data->x && y < data->y) {
if (changeBLO(x, y, pHeater, count)) {
// fixed blocks, u_new = u
u_new[HD2(x, y)] = u[HD2(x, y)];
}
else
{
if (x == 0 && y == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x + 1, y)] + u[HD2(x, y + 1)] - 2 * u[HD2(x, y)]);
else if (x == 0 && y == data->y - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x + 1, y)] + u[HD2(x, y - 1)] - 2 * u[HD2(x, y)]);
else if (x == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x + 1, y)] + u[HD2(x, y - 1)] + u[HD2(x, y + 1)] - 3 * u[HD2(x, y)]);
else if (x == data->x - 1 && y == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x, y + 1)] - 2 * u[HD2(x, y)]);
else if (x == data->x - 1 && y == data->y - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x, y - 1)] - 2 * u[HD2(x, y)]);
else if (x == data->x - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x, y - 1)] + u[HD2(x, y + 1)] - 3 * u[HD2(x, y)]);
else if (y == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x + 1, y)] + u[HD2(x, y + 1)] - 3 * u[HD2(x, y)]);
else if (y == data->y - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x + 1, y)] + u[HD2(x, y - 1)] - 3 * u[HD2(x, y)]);
else u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x + 1, y)] + u[HD2(x, y - 1)] + u[HD2(x, y + 1)] - 4 * u[HD2(x, y)]);
}
}
float *temp = u_new;
u_new = u;
u = temp;
}
__global__ void heat3D(float *u, float *u_new, CONF *data, BLOCK *pHeater, int count) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z;
if (x < data->x && y < data->y && z < data->z) {
if (changeBLO(x, y, z, pHeater, count)) {
u_new[HD3(x, y, z)] = u[HD3(x, y, z)];
}
else {
if (x == 0 && y == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0 && y == data->y - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == data->y - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == data->y - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == data->y - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == data->y - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == data->y - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (y == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (y == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (y == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (y == data->y - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (y == data->y - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (y == data->y - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 5 * u[HD3(x, y, z)]);
else u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 6 * u[HD3(x, y, z)]);
}
}
float *temp = u_new;
u_new = u;
u = temp;
}
//global variable
CONF data;
int main(int argc, char* argv[]) {
if (argc != 2) {
cout << "Input error." << endl;
return -1;
}
dim3 numBlocks, threadsPerBlock;
//read file
string path = argv[1];
read(path, data);
//initialize
vector<float> u;
vector<float> u_new;
initialize(data, u);
u_new = u;
//set threads and blocks
threadsPerBlock = dim3(THREAD_SIZE, THREAD_SIZE);
numBlocks = dim3(FUNC(data.x, THREAD_SIZE), FUNC(data.y, THREAD_SIZE), data.z);
//allocate temperature array space on device
float *d_u, *d_u_new, *temp;
CONF* d_data;
int size = data.x * data.y * data.z;
BLOCK* d_heater;
int count = data.heat_block.size();
hipMalloc((void **)&d_u, size * sizeof(float));
hipMalloc((void **)&d_u_new, size * sizeof(float));
hipMalloc((void **)&d_data, sizeof(CONF));
hipMalloc((void **)&d_heater, count * sizeof(BLOCK));
//copy inputs to device
hipMemcpy(d_u_new, &u_new[0], size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_data, &data, sizeof(CONF), hipMemcpyHostToDevice);
hipMemcpy(d_heater, data.heat_block.data(), count * sizeof(BLOCK), hipMemcpyHostToDevice);
hipMemcpy(d_u, &u[0], size * sizeof(float), hipMemcpyHostToDevice);
//gpu loop
for (int i = 0; i < data.num_step; i++) {
if (data.dimension == 2){
hipLaunchKernelGGL(( heat2D) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_u, d_u_new, d_data, d_heater, count);
}
else{
hipLaunchKernelGGL(( heat3D) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_u, d_u_new, d_data, d_heater, count);
}
temp = d_u_new;
d_u_new = d_u;
d_u = temp;
}
//copy temperature array from device to host
hipMemcpy(&u[0], d_u, size * sizeof(float), hipMemcpyDeviceToHost);
print(data, u);
//free memory
hipFree(d_u);
hipFree(d_u_new);
hipFree(d_data);
hipFree(d_heater);
return 0;
} | 4846fc4c26ee2693cf71a81bec764806fcb31553.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <string>
#include <stdlib.h>
#include <cuda.h>
using namespace std;
#define THREAD_SIZE 8
#define FUNC(a,b) ((a) + (b) - 1)/(b)
#define HD2(x0,y0) ((x0) + (y0) * data->x)
#define HD2_1(x0,y0) ((x0) + (y0) * data.x)
#define HD3(x0,y0,z0) ((x0) + (y0) * data->x + (z0) * data->x * data->y)
#define HD3_1(x0,y0,z0) ((x0) + (y0) * data.x + (z0) * data.x * data.y)
struct BLOCK
{
int x;
int y;
int z;
int width;
int height;
int depth;
float temp;
};
struct CONF
{
int dimension;
float n;
int num_step;
int x;
int y;
int z;
float def_temp; //default starting temperature for nodes
vector<BLOCK> heat_block;
};
void read(string & path, CONF & data) {
fstream theconf(path, ios_base::in);
string s;
int s_num;
if (!theconf.is_open()) {
cerr << "file cannot be opened.\n";
}
while (getline(theconf, s)) {
if (s[0] == '#' || s[0] == ' ') {
continue;
}
if (s_num == 0) {
if (s[0] == '2') {
data.dimension = 2;
} else {
data.dimension = 3;
}
} else if (s_num == 1) {
data.n = stof(s);
cout << "n = " << data.n << endl;
} else if (s_num == 2) {
data.num_step = stoi(s);
cout << "timestep = " << data.num_step << endl;
} else if (s_num == 3) {
stringstream ss(s);
ss.str(s);
char c;
if (data.dimension == 2) {
ss >> data.x >> c >> data.y;
data.z = 1;
cout << "width = " << data.x << endl;
cout << "height = " << data.y << endl;
cout << "depth = " << data.z << endl;
}
else if (data.dimension == 3) {
ss >> data.x >> c >> data.y >> c >> data.z;
cout << "width = " << data.x << endl;
cout << "height = " << data.y << endl;
cout << "depth = " << data.z << endl;
}
}
else if (s_num == 4) {
data.def_temp = stof(s);
}
else{
stringstream ss(s);
ss.str(s);
char c;
BLOCK b;
if (data.dimension == 2) {
ss >> b.x >> c >> b.y >> c >> b.width >> c >> b.height >> c >> b.temp;
b.z = 0; b.depth = 1;
cout << "fixed blocks: " << endl;
cout << "(" << b.x << ',' << b.y << ',' << b.z << "): width = " << b.width << " height = " << b.height << " depth = " << b.depth << " temp = " << b.temp << endl;
}
else if (data.dimension == 3) {
ss >> b.x >> c >> b.y >> c >> b.z >> c >> b.width >> c >> b.height >> c >> b.depth >> c >> b.temp;
cout << "fixed blocks: " << endl;
cout << "(" << b.x << ',' << b.y << ',' << b.z << "): width = " << b.width << " height = " << b.height << " depth = " << b.depth << " temp = " << b.temp << endl;
}
data.heat_block.push_back(b);
}
s_num++;
}
theconf.close();
}
//default temperature in heat blocks
void initialize(CONF &data, vector<float> &u) {
u.assign(data.x * data.y * data.z, data.def_temp);
for (int i = 0; i < data.heat_block.size(); i++) {
BLOCK b = data.heat_block[i];
for (int m = b.z; m < b.z + b.depth; m++) {
for (int j = b.y; j < b.y + b.height; j++) {
for (int n = b.x; n < b.x + b.width; n++) {
u[HD3_1(n, j, m)] = b.temp;
}
}
}
}
}
//output csv file
void print(const CONF &data, const vector<float> &u) {
char filename[] = "heat2D3Doutput.csv";
ofstream fout(filename);
for (int m = 0; m < data.z; m++) {
for (int j = 0; j < data.y; j++) {
for (int n = 0; n < data.x - 1; n++) {
fout << u[HD3_1(n, j, m)] << ',';
}
fout << u[HD3_1(data.x - 1, j, m)] << endl;
}
fout << endl;
}
fout.close();
}
//Decide the node to be change(0) or not(1)
__device__ bool changeBLO(int x0, int y0, BLOCK* &heat_block, int count) {
for (int i = 0; i < count; i++) {
if (x0 >= heat_block[i].x && x0 < heat_block[i].x + heat_block[i].width &&
y0 >= heat_block[i].y && y0 < heat_block[i].y + heat_block[i].height)
//z0 >= heat_block[i].z && z0 < heat_block[i].z + heat_block[i].depth
return true;
}
return false;
}
__device__ bool changeBLO(int x0, int y0, int z0, BLOCK* &heat_block, int count) {
for (int i = 0; i < count; i++) {
if (x0 >= heat_block[i].x && x0 < heat_block[i].x + heat_block[i].width &&
y0 >= heat_block[i].y && y0 < heat_block[i].y + heat_block[i].height &&
z0 >= heat_block[i].z && z0 < heat_block[i].z + heat_block[i].depth)
return true;
}
return false;
}
__global__ void heat2D(float *u, float *u_new, CONF *data, BLOCK *pHeater, int count){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
//int z = blockDim.z * blockIdx.z;
if (x < data->x && y < data->y) {
if (changeBLO(x, y, pHeater, count)) {
// fixed blocks, u_new = u
u_new[HD2(x, y)] = u[HD2(x, y)];
}
else
{
if (x == 0 && y == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x + 1, y)] + u[HD2(x, y + 1)] - 2 * u[HD2(x, y)]);
else if (x == 0 && y == data->y - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x + 1, y)] + u[HD2(x, y - 1)] - 2 * u[HD2(x, y)]);
else if (x == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x + 1, y)] + u[HD2(x, y - 1)] + u[HD2(x, y + 1)] - 3 * u[HD2(x, y)]);
else if (x == data->x - 1 && y == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x, y + 1)] - 2 * u[HD2(x, y)]);
else if (x == data->x - 1 && y == data->y - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x, y - 1)] - 2 * u[HD2(x, y)]);
else if (x == data->x - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x, y - 1)] + u[HD2(x, y + 1)] - 3 * u[HD2(x, y)]);
else if (y == 0) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x + 1, y)] + u[HD2(x, y + 1)] - 3 * u[HD2(x, y)]);
else if (y == data->y - 1) u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x + 1, y)] + u[HD2(x, y - 1)] - 3 * u[HD2(x, y)]);
else u_new[HD2(x, y)] = u[HD2(x, y)] + data->n * (u[HD2(x - 1, y)] + u[HD2(x + 1, y)] + u[HD2(x, y - 1)] + u[HD2(x, y + 1)] - 4 * u[HD2(x, y)]);
}
}
float *temp = u_new;
u_new = u;
u = temp;
}
__global__ void heat3D(float *u, float *u_new, CONF *data, BLOCK *pHeater, int count) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z;
if (x < data->x && y < data->y && z < data->z) {
if (changeBLO(x, y, z, pHeater, count)) {
u_new[HD3(x, y, z)] = u[HD3(x, y, z)];
}
else {
if (x == 0 && y == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0 && y == data->y - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == data->y - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == 0 && y == data->y - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (x == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == data->y - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z + 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == data->y - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] - 3 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && y == data->y - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (x == data->x - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (y == 0 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (y == 0 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (y == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (y == data->y - 1 && z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z + 1)] - 4 * u[HD3(x, y, z)]);
else if (y == data->y - 1 && z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] - 4 * u[HD3(x, y, z)]);
else if (y == data->y - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (z == 0) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z + 1)] - 5 * u[HD3(x, y, z)]);
else if (z == data->z - 1) u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] - 5 * u[HD3(x, y, z)]);
else u_new[HD3(x, y, z)] = u[HD3(x, y, z)] + data->n * (u[HD3(x - 1, y, z)] + u[HD3(x + 1, y, z)] + u[HD3(x, y - 1, z)] + u[HD3(x, y + 1, z)] + u[HD3(x, y, z - 1)] + u[HD3(x, y, z + 1)] - 6 * u[HD3(x, y, z)]);
}
}
float *temp = u_new;
u_new = u;
u = temp;
}
//global variable
CONF data;
int main(int argc, char* argv[]) {
if (argc != 2) {
cout << "Input error." << endl;
return -1;
}
dim3 numBlocks, threadsPerBlock;
//read file
string path = argv[1];
read(path, data);
//initialize
vector<float> u;
vector<float> u_new;
initialize(data, u);
u_new = u;
//set threads and blocks
threadsPerBlock = dim3(THREAD_SIZE, THREAD_SIZE);
numBlocks = dim3(FUNC(data.x, THREAD_SIZE), FUNC(data.y, THREAD_SIZE), data.z);
//allocate temperature array space on device
float *d_u, *d_u_new, *temp;
CONF* d_data;
int size = data.x * data.y * data.z;
BLOCK* d_heater;
int count = data.heat_block.size();
cudaMalloc((void **)&d_u, size * sizeof(float));
cudaMalloc((void **)&d_u_new, size * sizeof(float));
cudaMalloc((void **)&d_data, sizeof(CONF));
cudaMalloc((void **)&d_heater, count * sizeof(BLOCK));
//copy inputs to device
cudaMemcpy(d_u_new, &u_new[0], size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_data, &data, sizeof(CONF), cudaMemcpyHostToDevice);
cudaMemcpy(d_heater, data.heat_block.data(), count * sizeof(BLOCK), cudaMemcpyHostToDevice);
cudaMemcpy(d_u, &u[0], size * sizeof(float), cudaMemcpyHostToDevice);
//gpu loop
for (int i = 0; i < data.num_step; i++) {
if (data.dimension == 2){
heat2D <<< numBlocks, threadsPerBlock>>>(d_u, d_u_new, d_data, d_heater, count);
}
else{
heat3D <<< numBlocks, threadsPerBlock>>>(d_u, d_u_new, d_data, d_heater, count);
}
temp = d_u_new;
d_u_new = d_u;
d_u = temp;
}
//copy temperature array from device to host
cudaMemcpy(&u[0], d_u, size * sizeof(float), cudaMemcpyDeviceToHost);
print(data, u);
//free memory
cudaFree(d_u);
cudaFree(d_u_new);
cudaFree(d_data);
cudaFree(d_heater);
return 0;
} |
1f83cd6d273edc17b570e3b666caeb63d0a06166.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
/*
This code is copied/adapted from
https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
*/
using namespace std;
int main(int argc, char const *argv[]) {
/* code */
int nDevices = 0;
hipGetDeviceCount(&nDevices);
//sets nDevices to the number of CUDA capable devices (GPUs)
cout <<"Total Devices: "<< nDevices << '\n';
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cout << "Device Numer: " << i <<"\n";
cout <<"\t"<< "Device Name:"<<prop.name<<"\n";
cout <<"\t"<< "Clock Rate(KHz):"<<prop.memoryClockRate<<"\n";
cout <<"\t"<< "But Width(bits):"<<prop.memoryBusWidth<<"\n";
cout << "\t" << "Memory Bandwidth(GB/s):" <<
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << "\n";
}
return 0;
}
| 1f83cd6d273edc17b570e3b666caeb63d0a06166.cu | #include <iostream>
/*
This code is copied/adapted from
https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
*/
using namespace std;
int main(int argc, char const *argv[]) {
/* code */
int nDevices = 0;
cudaGetDeviceCount(&nDevices);
//sets nDevices to the number of CUDA capable devices (GPUs)
cout <<"Total Devices: "<< nDevices << '\n';
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout << "Device Numer: " << i <<"\n";
cout <<"\t"<< "Device Name:"<<prop.name<<"\n";
cout <<"\t"<< "Clock Rate(KHz):"<<prop.memoryClockRate<<"\n";
cout <<"\t"<< "But Width(bits):"<<prop.memoryBusWidth<<"\n";
cout << "\t" << "Memory Bandwidth(GB/s):" <<
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << "\n";
}
return 0;
}
|
b3c70949f4bad879fd20ebf93e597f32ece51914.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdiocu.h>
#include <stringcu.h>
#include <pwdcu.h>
#include <assert.h>
static __global__ void g_pwd_test1() {
printf("pwd_test1\n");
//// GETPWUID, GETPWNAM, GETPWENT, ENDPWENT, SETPWENT ////
//extern __device__ struct passwd *getpwuid_(uid_t uid);
//extern __device__ struct passwd *getpwnam_(const char *name);
//extern __device__ struct passwd *getpwent_();
//extern __device__ void endpwent_();
//#define setpwent
struct passwd *a0a = getpwuid(0); struct passwd *a0b = getpwuid(1); bool a0c = !strcmp(a0b->pw_name, "std"); assert(!a0a && a0b && a0c);
struct passwd *b0a = getpwnam(nullptr); struct passwd *b0b = getpwnam(""); struct passwd *b0c = getpwnam("abc"); struct passwd *b0d = getpwnam("std"); bool b0e = !strcmp(b0d->pw_name, "std"); assert(!b0a && !b0b && !b0c && b0d && b0e);
struct passwd *c0a = getpwent(); assert(c0a); int c0b = 1; while ((c0a = getpwent()) != nullptr) c0b++; assert(c0b == 1);
struct passwd *d0a = getpwent(); setpwent(); struct passwd *d0b = getpwent(); endpwent(); struct passwd *d0c = getpwent(); assert(!d0a && d0b && d0c);
}
hipError_t pwd_test1() {hipLaunchKernelGGL(( g_pwd_test1), dim3(1), dim3(1), 0, 0, ); return hipDeviceSynchronize(); }
| b3c70949f4bad879fd20ebf93e597f32ece51914.cu | #include <stdiocu.h>
#include <stringcu.h>
#include <pwdcu.h>
#include <assert.h>
static __global__ void g_pwd_test1() {
printf("pwd_test1\n");
//// GETPWUID, GETPWNAM, GETPWENT, ENDPWENT, SETPWENT ////
//extern __device__ struct passwd *getpwuid_(uid_t uid);
//extern __device__ struct passwd *getpwnam_(const char *name);
//extern __device__ struct passwd *getpwent_();
//extern __device__ void endpwent_();
//#define setpwent
struct passwd *a0a = getpwuid(0); struct passwd *a0b = getpwuid(1); bool a0c = !strcmp(a0b->pw_name, "std"); assert(!a0a && a0b && a0c);
struct passwd *b0a = getpwnam(nullptr); struct passwd *b0b = getpwnam(""); struct passwd *b0c = getpwnam("abc"); struct passwd *b0d = getpwnam("std"); bool b0e = !strcmp(b0d->pw_name, "std"); assert(!b0a && !b0b && !b0c && b0d && b0e);
struct passwd *c0a = getpwent(); assert(c0a); int c0b = 1; while ((c0a = getpwent()) != nullptr) c0b++; assert(c0b == 1);
struct passwd *d0a = getpwent(); setpwent(); struct passwd *d0b = getpwent(); endpwent(); struct passwd *d0c = getpwent(); assert(!d0a && d0b && d0c);
}
cudaError_t pwd_test1() { g_pwd_test1<<<1, 1>>>(); return cudaDeviceSynchronize(); }
|
809b14761e3373c1a3942d93a63c6690f6eee7f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void square(float *d_out, float *d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char **argv) {
const int ARRAY_SIZE = 1024;
const int ARRRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = (float) i;
}
float h_out[ARRAY_SIZE];
float *d_in;
float *d_out;
hipMalloc((void **) &d_in, ARRRAY_BYTES);
hipMalloc((void **) &d_out, ARRRAY_BYTES);
hipMemcpy(d_in, h_in, ARRRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
hipMemcpy(h_out, d_out, ARRRAY_BYTES, hipMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | 809b14761e3373c1a3942d93a63c6690f6eee7f7.cu | #include <stdio.h>
__global__ void square(float *d_out, float *d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char **argv) {
const int ARRAY_SIZE = 1024;
const int ARRRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = (float) i;
}
float h_out[ARRAY_SIZE];
float *d_in;
float *d_out;
cudaMalloc((void **) &d_in, ARRRAY_BYTES);
cudaMalloc((void **) &d_out, ARRRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
cudaMemcpy(h_out, d_out, ARRRAY_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
7220760e533d3215de7586ced60c4fbd3484fbfe.hip | // !!! This is a file automatically generated by hipify!!!
/*
* FluxFunctions.cu
*
* Created on: Oct 22, 2015
* Author: bazow
*/
#include <stdlib.h>
#include <stdio.h> // for printf
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "edu/osu/rhic/trunk/hydro/FluxFunctions.cuh"
#include "edu/osu/rhic/trunk/hydro/EnergyMomentumTensor.cuh"
#include "edu/osu/rhic/trunk/hydro/DynamicalVariables.cuh"
__device__
PRECISION Fx(PRECISION q, PRECISION ut, PRECISION ux, PRECISION uy, PRECISION un) {
return ux * q / ut;
}
__device__
PRECISION Fy(PRECISION q, PRECISION ut, PRECISION ux, PRECISION uy, PRECISION un) {
return uy * q / ut;
}
__device__
PRECISION Fz(PRECISION q, PRECISION ut, PRECISION ux, PRECISION uy, PRECISION un) {
return un * q / ut;
}
| 7220760e533d3215de7586ced60c4fbd3484fbfe.cu | /*
* FluxFunctions.cu
*
* Created on: Oct 22, 2015
* Author: bazow
*/
#include <stdlib.h>
#include <stdio.h> // for printf
#include <cuda.h>
#include <cuda_runtime.h>
#include "edu/osu/rhic/trunk/hydro/FluxFunctions.cuh"
#include "edu/osu/rhic/trunk/hydro/EnergyMomentumTensor.cuh"
#include "edu/osu/rhic/trunk/hydro/DynamicalVariables.cuh"
__device__
PRECISION Fx(PRECISION q, PRECISION ut, PRECISION ux, PRECISION uy, PRECISION un) {
return ux * q / ut;
}
__device__
PRECISION Fy(PRECISION q, PRECISION ut, PRECISION ux, PRECISION uy, PRECISION un) {
return uy * q / ut;
}
__device__
PRECISION Fz(PRECISION q, PRECISION ut, PRECISION ux, PRECISION uy, PRECISION un) {
return un * q / ut;
}
|
65e8813816e2ed152a0337c5f18b1192be62b8cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/recover_padding_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
__global__ void RecoverPaddingKernel(const float* input0, const int32_t* input1,
float* output) {
int word_id = blockIdx.x * gridDim.y + blockIdx.y;
int32_t seqence_length = input1[blockIdx.x + 1] - input1[blockIdx.x];
if (blockIdx.y < seqence_length) {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] =
input0[(input1[blockIdx.x] + blockIdx.y) * gridDim.z * blockDim.x +
blockIdx.z * blockDim.x + threadIdx.x];
} else {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] = 0;
}
}
nvinfer1::DataType RecoverPaddingPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
return input_types[0];
}
nvinfer1::DimsExprs RecoverPaddingPlugin::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output_dims{};
output_dims.nbDims = 3;
const auto* one = exprBuilder.constant(1);
output_dims.d[0] = exprBuilder.operation(nvinfer1::DimensionOperation::kSUB,
*inputs[1].d[0], *one);
output_dims.d[1] = inputs[2].d[1];
output_dims.d[2] = inputs[0].d[1];
return output_dims;
}
bool RecoverPaddingPlugin::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nbInputs, 3,
platform::errors::InvalidArgument("Must have 3 inputs, "
"but got %d input(s). ",
nbInputs));
PADDLE_ENFORCE_EQ(nbOutputs, getNbOutputs(),
platform::errors::InvalidArgument("Must have 1 output, "
"but got %d output(s). ",
nbOutputs));
if (pos == 1) { // PosId, MaxSeqlen
return inOut[pos].type == nvinfer1::DataType::kINT32 &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
}
return inOut[pos].type == nvinfer1::DataType::kFLOAT &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
// return (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format
// == nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kHALF && inOut[pos].format ==
// nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format ==
// nvinfer1::TensorFormat::kCHW32);
}
void RecoverPaddingPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* outputs,
int nbOutputs) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::detachFromContext() TRT_NOEXCEPT {}
void RecoverPaddingPlugin::terminate() TRT_NOEXCEPT {}
int RecoverPaddingPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs, void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
const auto input0_desc = inputDesc[0];
const auto input1_desc = inputDesc[1];
const auto input2_desc = inputDesc[2];
const float* input0 = static_cast<const float*>(inputs[0]);
const int32_t* input1 =
static_cast<const int32_t*>(inputs[1]); // pos_id_tensor
float* output = static_cast<float*>(outputs[0]);
const int32_t num_threads = 256;
const dim3 num_blocks(
input1_desc.dims.d[0] - 1, input2_desc.dims.d[1],
input0_desc.dims.d[1] / num_threads); // batchs, max sequnce length
// (mask_id.dims.d[1]),
// input.dims.d[1]/256
hipLaunchKernelGGL(( RecoverPaddingKernel), dim3(num_blocks), dim3(num_threads), 0, stream, input0, input1,
output);
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 65e8813816e2ed152a0337c5f18b1192be62b8cd.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/recover_padding_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
__global__ void RecoverPaddingKernel(const float* input0, const int32_t* input1,
float* output) {
int word_id = blockIdx.x * gridDim.y + blockIdx.y;
int32_t seqence_length = input1[blockIdx.x + 1] - input1[blockIdx.x];
if (blockIdx.y < seqence_length) {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] =
input0[(input1[blockIdx.x] + blockIdx.y) * gridDim.z * blockDim.x +
blockIdx.z * blockDim.x + threadIdx.x];
} else {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] = 0;
}
}
nvinfer1::DataType RecoverPaddingPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
return input_types[0];
}
nvinfer1::DimsExprs RecoverPaddingPlugin::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output_dims{};
output_dims.nbDims = 3;
const auto* one = exprBuilder.constant(1);
output_dims.d[0] = exprBuilder.operation(nvinfer1::DimensionOperation::kSUB,
*inputs[1].d[0], *one);
output_dims.d[1] = inputs[2].d[1];
output_dims.d[2] = inputs[0].d[1];
return output_dims;
}
bool RecoverPaddingPlugin::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nbInputs, 3,
platform::errors::InvalidArgument("Must have 3 inputs, "
"but got %d input(s). ",
nbInputs));
PADDLE_ENFORCE_EQ(nbOutputs, getNbOutputs(),
platform::errors::InvalidArgument("Must have 1 output, "
"but got %d output(s). ",
nbOutputs));
if (pos == 1) { // PosId, MaxSeqlen
return inOut[pos].type == nvinfer1::DataType::kINT32 &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
}
return inOut[pos].type == nvinfer1::DataType::kFLOAT &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
// return (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format
// == nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kHALF && inOut[pos].format ==
// nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format ==
// nvinfer1::TensorFormat::kCHW32);
}
void RecoverPaddingPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* outputs,
int nbOutputs) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::detachFromContext() TRT_NOEXCEPT {}
void RecoverPaddingPlugin::terminate() TRT_NOEXCEPT {}
int RecoverPaddingPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs, void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const auto input0_desc = inputDesc[0];
const auto input1_desc = inputDesc[1];
const auto input2_desc = inputDesc[2];
const float* input0 = static_cast<const float*>(inputs[0]);
const int32_t* input1 =
static_cast<const int32_t*>(inputs[1]); // pos_id_tensor
float* output = static_cast<float*>(outputs[0]);
const int32_t num_threads = 256;
const dim3 num_blocks(
input1_desc.dims.d[0] - 1, input2_desc.dims.d[1],
input0_desc.dims.d[1] / num_threads); // batchs, max sequnce length
// (mask_id.dims.d[1]),
// input.dims.d[1]/256
RecoverPaddingKernel<<<num_blocks, num_threads, 0, stream>>>(input0, input1,
output);
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
de64e80010e1d6e08c6604da6d71638f430b2bd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1_14.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7583701946981,0.00123873000842469,0.784340161140820,0.784215606615357,0.000169930428482162,0.487069003179197,0.00290075487318139,0.999998410707334,1.87318094176279e-08,1.84373365407432e-05,0.999775582067229,1.00670303904820,0.999986058074727,5.42971721960811e-05,0.634327980617936,8.41115593615013,141.093709811985}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.1091507301611,0.000227256021779263,0.000160878689291275,0.000673024254981826,0.282780279127790,0.152968591524632,0.160618022632678,3.47494398199649,0.0192974956098871,3.38770971695943,1099.68930724138,0.000549392613760007,0.234906883379890,0.0197346252955553,0.00432868966873845,5.16755137958392e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| de64e80010e1d6e08c6604da6d71638f430b2bd0.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1_14.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7583701946981,0.00123873000842469,0.784340161140820,0.784215606615357,0.000169930428482162,0.487069003179197,0.00290075487318139,0.999998410707334,1.87318094176279e-08,1.84373365407432e-05,0.999775582067229,1.00670303904820,0.999986058074727,5.42971721960811e-05,0.634327980617936,8.41115593615013,141.093709811985}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.1091507301611,0.000227256021779263,0.000160878689291275,0.000673024254981826,0.282780279127790,0.152968591524632,0.160618022632678,3.47494398199649,0.0192974956098871,3.38770971695943,1099.68930724138,0.000549392613760007,0.234906883379890,0.0197346252955553,0.00432868966873845,5.16755137958392e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
d54adaa5a77e0899ce0f2e41d93d6c9b01f0c90d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
int main()
{
cout << "Hello CMake." << endl;
return 0;
}
| d54adaa5a77e0899ce0f2e41d93d6c9b01f0c90d.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
int main()
{
cout << "Hello CMake." << endl;
return 0;
}
|
d721d025dff044f085dee857e3a6022ed4d25275.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel( void ) {
}
int main( void ) {
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, );
printf( "Hello, World!\n" );
return 0;
}
| d721d025dff044f085dee857e3a6022ed4d25275.cu | #include <stdio.h>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1, 1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
dd9dd896add7590ca58df211f80244d93c49ac5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include "support.cuh"
#include <stdlib.h>
#include "htpykinematics.cuh"
__device__ void FSYS(Vect<nV + 1> *Z, Vect<nP> *q, Vect<nV> *eval){
Cplx A = Z[0].vals[0];
Cplx B = Z[0].vals[1];
Cplx C = Z[0].vals[2];
Cplx D = Z[0].vals[3];
Cplx F = Z[0].vals[4];
Cplx G = Z[0].vals[5];
Cplx H = Z[0].vals[6];
Cplx Ac = Z[0].vals[7];
Cplx Bc = Z[0].vals[8];
Cplx Cc = Z[0].vals[9];
Cplx Dc = Z[0].vals[10];
Cplx Fc = Z[0].vals[11];
Cplx Gc = Z[0].vals[12];
Cplx Hc = Z[0].vals[13];
Cplx *U = &Z[0].vals[14-1]; // subtract 1 for indexing 1 to 7
Cplx Z0 = Z[0].vals[21];
Cplx *P = &q[0].vals[0]; // indexed from 0 to 7
Cplx *Pc = &q[0].vals[8]; // indexed from 0 to 7
Cplx *Q = &q[0].vals[16-1]; // subtract 1 for indexing 1 to 7
Cplx *Qc = &q[0].vals[23-1]; // subtract 1 for indexing 1 to 7
#pragma unroll
for (int j = 1; j <= 7; ++j){
eval[0].vals[j - 1] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(\
cplxMul({ -1, 0 }, cplxMul(cplxAdd(cplxMul(P[j], Pc[j]), cplxMul(\
P[0], Pc[0])), cplxPow(Z0, 3))), cplxMul(cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j]), Z0), cplxAdd(cplxMul({ -1, 0 }, A \
), cplxMul(P[j], Z0)))), cplxMul(cplxMul(\
Ac, Z0), cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C), cplxMul(P[j], Z0)) \
)), cplxMul(cplxMul(Hc, Z0), cplxAdd(cplxMul({ -1, 0 }, C), cplxMul(\
P[0], Z0)))), cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Q[j]), Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(A, Z0), cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc \
), cplxMul(Pc[j], Z0)))), cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(H, Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), cplxMul(\
Pc[0], Z0)))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, 2), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(Pc[0], Z0))), U[j]));
eval[0].vals[j + 6] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxSub(cplxAdd(cplxMul(cplxMul(Bc, P[j])\
, cplxPow(Z0, { 2, 0 })), cplxMul(cplxMul(B, Pc[j]), cplxPow(Z0, { 2, 0 }))), cplxMul(\
cplxAdd(cplxMul(P[j], Pc[j]), cplxMul(P[0], Pc[0])), cplxPow(Z0, { 3, 0 } \
))), cplxMul(cplxMul(Fc, Z0), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[0], Z0)))), cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(F, Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[0], Z0)))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(Pc[0], Z0))), U[j]));
eval[0].vals[j + 13] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, \
0 }, D), G), cplxAdd(cplxMul({ -1, 0 }, Dc), Gc)), Z0 \
), cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd(cplxSub(A, B \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
}
}
__device__ void JVAR(Vect<nV+1> *Z, Vect<nP> *q, Matr<nV,nV+1> *eval){
Cplx A = Z[0].vals[0];
Cplx B = Z[0].vals[1];
Cplx C = Z[0].vals[2];
Cplx D = Z[0].vals[3];
Cplx F = Z[0].vals[4];
Cplx G = Z[0].vals[5];
Cplx H = Z[0].vals[6];
Cplx Ac = Z[0].vals[7];
Cplx Bc = Z[0].vals[8];
Cplx Cc = Z[0].vals[9];
Cplx Dc = Z[0].vals[10];
Cplx Fc = Z[0].vals[11];
Cplx Gc = Z[0].vals[12];
Cplx Hc = Z[0].vals[13];
Cplx *U = &Z[0].vals[14 - 1]; // subtract 1 for indexing 1 to 7
Cplx Z0 = Z[0].vals[21];
Cplx *P = &q[0].vals[0]; // indexed from 0 to 7
Cplx *Pc = &q[0].vals[8]; // indexed from 0 to 7
Cplx *Q = &q[0].vals[16 - 1]; // subtract 1 for indexing 1 to 7
Cplx *Qc = &q[0].vals[23 - 1]; // subtract 1 for indexing 1 to 7
memset(eval[0].vals, 0, sizeof(Matr<nV, nV+1>));
#pragma unroll
for (int j = 1; j <= 7; ++j){
/* eqnsI derivatives */
/* dI/dA */
eval[0].vals[(j - 1) * 22 + 0] = cplxAdd(cplxAdd(cplxSub(cplxSub(cplxMul({ -1, 0 }, cplxMul(Ac, Z0) \
), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j]), Z0 \
)), cplxMul(cplxMul(Q[j], Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(\
Pc[j], Z0)))), cplxMul(Z0, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc \
), cplxMul(Pc[j], Z0)))), cplxDiv(cplxMul(cplxMul(cplxAdd({ -1, 0 }, \
Q[j]), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dI/dB */
eval[0].vals[(j - 1) * 22 + 1] = { 0, 0 };
/* dI/dC */
eval[0].vals[(j - 1) * 22 + 2] = cplxSub(cplxAdd(cplxSub(cplxMul(Ac, Z0), cplxMul(Hc, Z0 \
)), cplxMul(cplxMul(Q[j], Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(\
Pc[j], Z0)))), cplxDiv(\
cplxMul(cplxMul(Q[j], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), U[j]));
/* dI/dD */
eval[0].vals[(j - 1) * 22 + 3] = { 0, 0 };
/* dI/dF */
eval[0].vals[(j - 1) * 22 + 4] = { 0, 0 };
/* dI/dG */
eval[0].vals[(j - 1) * 22 + 5] = { 0, 0 };
/* dI/dH */
eval[0].vals[(j - 1) * 22 + 6] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(U[j], cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0))) \
), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Cc), cplxMul(Pc[0], Z0))));
/* dI/dAc */
eval[0].vals[(j - 1) * 22 + 7] = cplxAdd(cplxAdd(cplxSub(cplxSub(cplxMul({ -1, 0 }, cplxMul(A, Z0) \
), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j]), Z0 \
)), cplxMul(cplxMul(Qc[j], Z0), cplxAdd(cplxMul({ -1, 0 }, A), cplxMul(\
P[j], Z0)))), cplxMul(Z0, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C \
), cplxMul(P[j], Z0)))), cplxMul(cplxMul(cplxAdd({ -1, 0 }, \
Qc[j]), U[j]), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(P[0], Z0))));
/* dI/dBc */
eval[0].vals[(j - 1) * 22 + 8] = { 0, 0 };
/* dI/dCc */
eval[0].vals[(j - 1) * 22 + 9] = cplxSub(cplxAdd(cplxSub(cplxMul(A, Z0), cplxMul(H, Z0 \
)), cplxMul(cplxMul(Qc[j], Z0), cplxAdd(cplxMul({ -1, 0 }, A), cplxMul(\
P[j], Z0)))), cplxMul(cplxMul(Qc[j], U[j]), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))));
/* dI/dDc */
eval[0].vals[(j - 1) * 22 + 10] = { 0, 0 };
/* dI/dFc */
eval[0].vals[(j - 1) * 22 + 11] = { 0, 0 };
/* dI/dGc */
eval[0].vals[(j - 1) * 22 + 12] = { 0, 0 };
/* dI/dHc */
eval[0].vals[(j - 1) * 22 + 13] = cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(P[j], Z0))), U[j]) \
), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, C), cplxMul(P[0], Z0))));
/* dI/dU */
eval[0].vals[(j - 1) * 22 + 13 + j] = cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0)), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(U[j], { 2, 0 })));
/* dI/dZ0 */
eval[0].vals[j * 22 - 1] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(\
cplxAdd(cplxAdd(cplxAdd(cplxSub(cplxAdd(cplxAdd(cplxAdd(cplxAdd(\
cplxAdd(cplxMul(cplxMul(Ac, P[j]), Z0), cplxMul(cplxMul(Hc, P[0]), Z0 \
)), cplxMul(cplxMul(A, Pc[j]), Z0)), cplxMul(cplxMul(H, Pc[0]), Z0 \
)), cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Pc[j]), Q[j]), Z0)), cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac \
), Cc), P[j]), Qc[j]), Z0)), cplxMul(cplxMul({ 3, 0 }, cplxAdd(cplxMul(P[j], Pc[j] \
), cplxMul(P[0], Pc[0]))), cplxPow(Z0, { 2, 0 }))), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j]), cplxAdd(cplxMul({ -1, 0 }, A \
), cplxMul(P[j], Z0)))), cplxMul(Ac, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), \
C), cplxMul(P[j], Z0)))), cplxDiv(\
cplxMul(cplxMul(Pc[0], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxSub(cplxMul({ -1, 0 }, \
A), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), U[j])), cplxMul(Hc, cplxAdd(cplxMul({ -1, 0 }, C), cplxMul(\
P[0], Z0)))), cplxMul(cplxMul(Pc[j], U[j]), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0)))), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Q[j]), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(Pc[j], Z0)))), cplxMul(\
A, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(P[0], U[j]), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0)) \
)), cplxMul(H, cplxAdd(cplxMul({ -1, 0 }, Cc), cplxMul(Pc[0], Z0)) \
)), cplxDiv(\
cplxMul(cplxMul(P[j], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), U[j])), cplxDiv(\
cplxMul(cplxMul(cplxMul({ 2, 0 }, Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(Pc[0], Z0))), U[j]));
/* eqnsII derivatives */
/* dII/dA */
eval[0].vals[(7 + j - 1) * 22 + 0] = { 0, 0 };
/* dII/dB */
eval[0].vals[(7 + j - 1) * 22 + 1] = cplxSub(cplxAdd(cplxMul({ -1, 0 }, cplxMul(Fc, Z0)), cplxMul(\
Pc[j], cplxPow(Z0, { 2, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dII/dC */
eval[0].vals[(7 + j - 1) * 22 + 2] = { 0, 0 };
/* dII/dD */
eval[0].vals[(7 + j - 1) * 22 + 3] = { 0, 0 };
/* dII/dF */
eval[0].vals[(7 + j - 1) * 22 + 4] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0)))), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[0], Z0))));
/* dII/dG */
eval[0].vals[(7 + j - 1) * 22 + 5] = { 0, 0 };
/* dII/dH */
eval[0].vals[(7 + j - 1) * 22 + 6] = { 0, 0 };
/* dII/dAc */
eval[0].vals[(7 + j - 1) * 22 + 7] = { 0, 0 };
/* dII/dBc */
eval[0].vals[(7 + j - 1) * 22 + 8] = cplxSub(cplxAdd(cplxMul({ -1, 0 }, cplxMul(F, Z0)), cplxMul(\
P[j], cplxPow(Z0, { 2, 0 }))), cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, F \
), cplxMul(P[0], Z0))));
/* dII/dCc */
eval[0].vals[(7 + j - 1) * 22 + 9] = { 0, 0 };
/* dII/dDc */
eval[0].vals[(7 + j - 1) * 22 + 10] = { 0, 0 };
/* dII/dFc */
eval[0].vals[(7 + j - 1) * 22 + 11] = cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), U[j])), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[0], Z0))));
/* dII/dGc */
eval[0].vals[(7 + j - 1) * 22 + 12] = { 0, 0 };
/* dII/dHc */
eval[0].vals[(7 + j - 1) * 22 + 13] = { 0, 0 };
/* dII/dU */
eval[0].vals[(7 + j - 1) * 22 + 13 + j] = cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxPow(U[j], { 2, 0 })));
/* dII/dZ0 */
eval[0].vals[(7 + j) * 22 - 1] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxSub(\
cplxAdd(cplxAdd(cplxAdd(cplxMul(cplxMul(cplxMul({ 2, 0 }, Bc), P[j]), Z0 \
), cplxMul(cplxMul(Fc, P[0]), Z0)), cplxMul(cplxMul(cplxMul({ \
2, 0 }, B), Pc[j]), Z0)), cplxMul(cplxMul(F, Pc[0]), Z0)), cplxMul(cplxMul({ \
3, 0 }, cplxAdd(cplxMul(P[j], Pc[j]), cplxMul(P[0], Pc[0]))), cplxPow(Z0, { 2, 0 } \
))), cplxDiv(\
cplxMul(cplxMul(Pc[0], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), U[j])), cplxMul(Fc, cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[0], Z0)))), cplxMul(cplxMul(Pc[j], U[j]), cplxAdd(cplxMul({ -1, 0 }, \
F), cplxMul(P[0], Z0)))), cplxMul(cplxMul(\
P[0], U[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0)))), cplxMul(\
F, cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[0], Z0)))), cplxDiv(\
cplxMul(cplxMul(P[j], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), U[j])), cplxDiv(\
cplxMul(cplxMul(cplxMul({ 2, 0 }, Z0), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(Pc[0], Z0))), U[j]));
/* eqnsIII derivatives */
/* dIII/dA */
eval[0].vals[(14 + j - 1) * 22 + 0] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(cplxSub(cplxAdd({ 1, 0 } \
, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd({ -1, 0 }, \
Q[j])), cplxAdd(cplxMul({ -1, 0 }, C), H))), \
Q[j]), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxSub(cplxAdd({ 1, 0 }, cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd({ -1, 0 }, \
Q[j])), cplxAdd(cplxMul({ -1, 0 }, C), H))), \
Q[j]), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j]));
/* dIII/dB */
eval[0].vals[(14 + j - 1) * 22 + 1] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd({ -1, 0 }, cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B), F) \
)), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, B \
), F)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
B), F)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), F))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, B \
), F)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
B), F)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dC */
eval[0].vals[(14 + j - 1) * 22 + 2] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxSub(cplxAdd(cplxSub(Q[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Q[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
A), C), Q[j])), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 })), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxSub(cplxAdd(cplxSub(Q[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Q[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
A), C), Q[j])), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxMul(\
Z0, cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 })), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dD */
eval[0].vals[(14 + j - 1) * 22 + 3] = cplxAdd(cplxSub(cplxSub(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Dc), Gc), Z0)), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), F), U[j]))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, B), F)));
/* dIII/dF */
eval[0].vals[(14 + j - 1) * 22 + 4] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 })) \
), cplxMul(cplxMul(U[j], cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), F)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, F \
), cplxMul(P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), \
F), { 2, 0 })))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }), U[j]) \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), F)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, F \
), cplxMul(P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), \
F), { 2, 0 })))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dG */
eval[0].vals[(14 + j - 1) * 22 + 5] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Dc \
), Gc), Z0), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, C), H))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, C), H))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), H), U[j]))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, C), H)));
/* dIII/dH */
eval[0].vals[(14 + j - 1) * 22 + 6] = cplxSub(cplxSub(cplxAdd(cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(\
cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 } \
)), cplxMul(cplxMul(U[j], cplxSub(cplxMul({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, C \
), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), \
H), { 2, 0 })))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 }), U[j]) \
)), cplxMul(cplxMul(Z0, cplxSub(cplxMul({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, C \
), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), \
H), { 2, 0 })))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dAc */
eval[0].vals[(14 + j - 1) * 22 + 7] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(cplxSub(cplxAdd({ 1, 0 } \
, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd({ -1, 0 }, \
Qc[j])), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), \
Qc[j]), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxMul(cplxMul(\
cplxSub(cplxAdd({ 1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc \
), Gc), cplxAdd({ -1, 0 }, Qc[j])), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), \
Qc[j]), U[j]), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
/* dIII/dBc */
eval[0].vals[(14 + j - 1) * 22 + 8] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd({ -1, 0 }, cplxDiv(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd({ -1, 0 }, cplxDiv(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 })) \
), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)))), U[j])), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 })) \
), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)))));
/* dIII/dCc */
eval[0].vals[(14 + j - 1) * 22 + 9] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxAdd(cplxSub(Qc[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Qc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxAdd(cplxSub(Qc[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Qc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
)), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j])), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
)), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dDc */
eval[0].vals[(14 + j - 1) * 22 + 10] = cplxAdd(cplxSub(cplxSub(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxAdd(cplxMul({ -1, 0 }, D), G), Z0)), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
Bc), Fc))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), U[j]))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)));
/* dIII/dFc */
eval[0].vals[(14 + j - 1) * 22 + 11] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 }))), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), U[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 }))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Fc), { 2, 0 })))), U[j])), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Fc), { 2, 0 })))));
/* dIII/dGc */
eval[0].vals[(14 + j - 1) * 22 + 12] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, D), \
G), Z0), cplxDiv(cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxMul({ -1, 0 }, \
Ac), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
Cc), Hc))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), U[j]))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)));
/* dIII/dHc */
eval[0].vals[(14 + j - 1) * 22 + 13] = cplxSub(cplxAdd(cplxSub(cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(\
cplxMul({ -1, 0 }, Cc), Gc), Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 })), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), U[j]), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 }))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxSub(cplxMul({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, \
Cc), Hc))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 })))), U[j] \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxMul({ -1, 0 }, cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc), { 2, 0 })))));
/* dIII/dU */
eval[0].vals[(14 + j - 1) * 22 + 13 + j] = cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxPow(U[j], { 2, 0 })));
/* dIII/dZ0 */
eval[0].vals[(14 + j) * 22 - 1] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxSub(cplxSub(cplxAdd(cplxSub(\
cplxAdd(cplxAdd(cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, D), \
G), cplxAdd(cplxMul({ -1, 0 }, Dc), Gc)), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxDiv(\
cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), U[j])), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), U[j]), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))) \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), Z0), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))) \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[0]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[0]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), U[j]), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[j]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(\
cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[0]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[0]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), Z0), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[j]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxDiv(\
cplxMul(cplxMul(cplxMul({ 2, 0 }, Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
}
}
__device__ void JPAR(Vect<nV + 1> *Z, Vect<nP> *q, Matr<nV, nP> *eval){
Cplx A = Z[0].vals[0];
Cplx B = Z[0].vals[1];
Cplx C = Z[0].vals[2];
Cplx D = Z[0].vals[3];
Cplx F = Z[0].vals[4];
Cplx G = Z[0].vals[5];
Cplx H = Z[0].vals[6];
Cplx Ac = Z[0].vals[7];
Cplx Bc = Z[0].vals[8];
Cplx Cc = Z[0].vals[9];
Cplx Dc = Z[0].vals[10];
Cplx Fc = Z[0].vals[11];
Cplx Gc = Z[0].vals[12];
Cplx Hc = Z[0].vals[13];
Cplx *U = &Z[0].vals[14 - 1]; // subtract 1 for indexing 1 to 7
Cplx Z0 = Z[0].vals[21];
Cplx *P = &q[0].vals[0]; // indexed from 0 to 7
Cplx *Pc = &q[0].vals[8]; // indexed from 0 to 7
Cplx *Q = &q[0].vals[16 - 1]; // subtract 1 for indexing 1 to 7
Cplx *Qc = &q[0].vals[23 - 1]; // subtract 1 for indexing 1 to 7
memset(eval[0].vals, 0, sizeof(Matr<nV, nP>));
#pragma unroll
for (int j = 1; j <= 7; ++j){
/* eqnsI derivatives */
/* dI/dP0 */
eval[0].vals[(j - 1) * 30] = cplxAdd(cplxSub(cplxMul(Hc, cplxPow(Z0, { 2, 0 })), cplxMul(\
Pc[0], cplxPow(Z0, { 3, 0 }))), cplxMul(cplxMul(\
U[j], Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0))));
/* dI/dP */
eval[0].vals[(j - 1) * 30 + j] = cplxAdd(cplxSub(cplxAdd(cplxMul(Ac, cplxPow(Z0, { 2, 0 } \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), \
Cc), Qc[j]), cplxPow(Z0, { 2, 0 }))), cplxMul(Pc[j], cplxPow(Z0, { 3, 0 } \
))), cplxDiv(cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), U[j]));
/* dI/dPc0 */
eval[0].vals[(j - 1) * 30 + 8] = cplxAdd(cplxSub(cplxMul(H, cplxPow(Z0, { 2, 0 })), cplxMul(\
P[0], cplxPow(Z0, { 3, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(P[j], Z0))), U[j]));
/* dI/dPc */
eval[0].vals[(j - 1) * 30 + 8 + j] = cplxAdd(cplxSub(cplxAdd(cplxMul(A, cplxPow(Z0, { 2, 0 } \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Q[j]), cplxPow(Z0, { 2, 0 }))), cplxMul(P[j], cplxPow(Z0, { 3, 0 } \
))), cplxMul(cplxMul(U[j], Z0), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))));
/* dI/dQ */
eval[0].vals[(j - 1) * 30 + 15 + j] = cplxAdd(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(Pc[j], Z0))), cplxDiv(\
cplxMul(cplxMul(cplxSub(A, \
C), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dI/dQc */
eval[0].vals[(j - 1) * 30 + 22 + j] = cplxAdd(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), \
Cc), Z0), cplxAdd(cplxMul({ -1, 0 }, A), cplxMul(P[j], Z0)) \
), cplxMul(cplxMul(cplxSub(Ac, Cc), U[j]), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))));
/* eqnsII derivatives */
/* dII/dP0 */
eval[0].vals[(7 + j - 1) * 30] = cplxAdd(cplxSub(cplxMul(Fc, cplxPow(Z0, { 2, 0 })), cplxMul(\
Pc[0], cplxPow(Z0, { 3, 0 }))), cplxMul(cplxMul(\
U[j], Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0))));
/* dII/dP */
eval[0].vals[(7 + j - 1) * 30 + j] = cplxAdd(cplxSub(cplxMul(Bc, cplxPow(Z0, { 2, 0 })), cplxMul(\
Pc[j], cplxPow(Z0, { 3, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dII/dPc0 */
eval[0].vals[(7 + j - 1) * 30 + 8] = cplxAdd(cplxSub(cplxMul(F, cplxPow(Z0, { 2, 0 })), cplxMul(\
P[0], cplxPow(Z0, { 3, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), U[j]));
/* dII/dPc */
eval[0].vals[(7 + j - 1) * 30 + 8 + j] = cplxAdd(cplxSub(cplxMul(B, cplxPow(Z0, { 2, 0 })), cplxMul(\
P[j], cplxPow(Z0, { 3, 0 }))), cplxMul(cplxMul(U[j], Z0), cplxAdd(cplxMul({ -1, 0 }, \
F), cplxMul(P[0], Z0))));
/* dII/dQ */
eval[0].vals[(7 + j - 1) * 30 + 15 + j] = { 0, 0 };
/* dII/dQc */
eval[0].vals[(7 + j - 1) * 30 + 22 + j] = { 0, 0 };
/* eqnsIII derivatives */
/* dIII/dP0 */
eval[0].vals[(14 + j - 1) * 30] = cplxSub(cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dP */
eval[0].vals[(14 + j - 1) * 30 + j] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j]));
/* dIII/dPc0 */
eval[0].vals[(14 + j - 1) * 30 + 8] = cplxSub(cplxDiv(cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, \
0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), U[j]), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc \
), Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
/* dIII/dPc */
eval[0].vals[(14 + j - 1) * 30 + 8 + j] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc \
), Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
/* dIII/dQ */
eval[0].vals[(14 + j - 1) * 30 + 15 + j] = cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C), cplxDiv(\
cplxMul(cplxSub(A, C), cplxAdd(cplxMul({ -1, 0 }, C), \
G)), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C), cplxDiv(\
cplxMul(cplxSub(A, C), cplxAdd(cplxMul({ -1, 0 }, C), \
G)), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j]));
/* dIII/dQc */
eval[0].vals[(14 + j - 1) * 30 + 22 + j] = cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), cplxDiv(\
cplxMul(cplxSub(Ac, Cc), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc)), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxMul(cplxMul(\
cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), cplxDiv(\
cplxMul(cplxSub(Ac, Cc), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), U[j]), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
}
}
| dd9dd896add7590ca58df211f80244d93c49ac5e.cu | #include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include "support.cuh"
#include <stdlib.h>
#include "htpykinematics.cuh"
__device__ void FSYS(Vect<nV + 1> *Z, Vect<nP> *q, Vect<nV> *eval){
Cplx A = Z[0].vals[0];
Cplx B = Z[0].vals[1];
Cplx C = Z[0].vals[2];
Cplx D = Z[0].vals[3];
Cplx F = Z[0].vals[4];
Cplx G = Z[0].vals[5];
Cplx H = Z[0].vals[6];
Cplx Ac = Z[0].vals[7];
Cplx Bc = Z[0].vals[8];
Cplx Cc = Z[0].vals[9];
Cplx Dc = Z[0].vals[10];
Cplx Fc = Z[0].vals[11];
Cplx Gc = Z[0].vals[12];
Cplx Hc = Z[0].vals[13];
Cplx *U = &Z[0].vals[14-1]; // subtract 1 for indexing 1 to 7
Cplx Z0 = Z[0].vals[21];
Cplx *P = &q[0].vals[0]; // indexed from 0 to 7
Cplx *Pc = &q[0].vals[8]; // indexed from 0 to 7
Cplx *Q = &q[0].vals[16-1]; // subtract 1 for indexing 1 to 7
Cplx *Qc = &q[0].vals[23-1]; // subtract 1 for indexing 1 to 7
#pragma unroll
for (int j = 1; j <= 7; ++j){
eval[0].vals[j - 1] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(\
cplxMul({ -1, 0 }, cplxMul(cplxAdd(cplxMul(P[j], Pc[j]), cplxMul(\
P[0], Pc[0])), cplxPow(Z0, 3))), cplxMul(cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j]), Z0), cplxAdd(cplxMul({ -1, 0 }, A \
), cplxMul(P[j], Z0)))), cplxMul(cplxMul(\
Ac, Z0), cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C), cplxMul(P[j], Z0)) \
)), cplxMul(cplxMul(Hc, Z0), cplxAdd(cplxMul({ -1, 0 }, C), cplxMul(\
P[0], Z0)))), cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Q[j]), Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(A, Z0), cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc \
), cplxMul(Pc[j], Z0)))), cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(H, Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), cplxMul(\
Pc[0], Z0)))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, 2), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(Pc[0], Z0))), U[j]));
eval[0].vals[j + 6] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxSub(cplxAdd(cplxMul(cplxMul(Bc, P[j])\
, cplxPow(Z0, { 2, 0 })), cplxMul(cplxMul(B, Pc[j]), cplxPow(Z0, { 2, 0 }))), cplxMul(\
cplxAdd(cplxMul(P[j], Pc[j]), cplxMul(P[0], Pc[0])), cplxPow(Z0, { 3, 0 } \
))), cplxMul(cplxMul(Fc, Z0), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[0], Z0)))), cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(F, Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[0], Z0)))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(Pc[0], Z0))), U[j]));
eval[0].vals[j + 13] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, \
0 }, D), G), cplxAdd(cplxMul({ -1, 0 }, Dc), Gc)), Z0 \
), cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd(cplxSub(A, B \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
}
}
__device__ void JVAR(Vect<nV+1> *Z, Vect<nP> *q, Matr<nV,nV+1> *eval){
Cplx A = Z[0].vals[0];
Cplx B = Z[0].vals[1];
Cplx C = Z[0].vals[2];
Cplx D = Z[0].vals[3];
Cplx F = Z[0].vals[4];
Cplx G = Z[0].vals[5];
Cplx H = Z[0].vals[6];
Cplx Ac = Z[0].vals[7];
Cplx Bc = Z[0].vals[8];
Cplx Cc = Z[0].vals[9];
Cplx Dc = Z[0].vals[10];
Cplx Fc = Z[0].vals[11];
Cplx Gc = Z[0].vals[12];
Cplx Hc = Z[0].vals[13];
Cplx *U = &Z[0].vals[14 - 1]; // subtract 1 for indexing 1 to 7
Cplx Z0 = Z[0].vals[21];
Cplx *P = &q[0].vals[0]; // indexed from 0 to 7
Cplx *Pc = &q[0].vals[8]; // indexed from 0 to 7
Cplx *Q = &q[0].vals[16 - 1]; // subtract 1 for indexing 1 to 7
Cplx *Qc = &q[0].vals[23 - 1]; // subtract 1 for indexing 1 to 7
memset(eval[0].vals, 0, sizeof(Matr<nV, nV+1>));
#pragma unroll
for (int j = 1; j <= 7; ++j){
/* eqnsI derivatives */
/* dI/dA */
eval[0].vals[(j - 1) * 22 + 0] = cplxAdd(cplxAdd(cplxSub(cplxSub(cplxMul({ -1, 0 }, cplxMul(Ac, Z0) \
), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j]), Z0 \
)), cplxMul(cplxMul(Q[j], Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(\
Pc[j], Z0)))), cplxMul(Z0, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc \
), cplxMul(Pc[j], Z0)))), cplxDiv(cplxMul(cplxMul(cplxAdd({ -1, 0 }, \
Q[j]), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dI/dB */
eval[0].vals[(j - 1) * 22 + 1] = { 0, 0 };
/* dI/dC */
eval[0].vals[(j - 1) * 22 + 2] = cplxSub(cplxAdd(cplxSub(cplxMul(Ac, Z0), cplxMul(Hc, Z0 \
)), cplxMul(cplxMul(Q[j], Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(\
Pc[j], Z0)))), cplxDiv(\
cplxMul(cplxMul(Q[j], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), U[j]));
/* dI/dD */
eval[0].vals[(j - 1) * 22 + 3] = { 0, 0 };
/* dI/dF */
eval[0].vals[(j - 1) * 22 + 4] = { 0, 0 };
/* dI/dG */
eval[0].vals[(j - 1) * 22 + 5] = { 0, 0 };
/* dI/dH */
eval[0].vals[(j - 1) * 22 + 6] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(U[j], cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0))) \
), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Cc), cplxMul(Pc[0], Z0))));
/* dI/dAc */
eval[0].vals[(j - 1) * 22 + 7] = cplxAdd(cplxAdd(cplxSub(cplxSub(cplxMul({ -1, 0 }, cplxMul(A, Z0) \
), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j]), Z0 \
)), cplxMul(cplxMul(Qc[j], Z0), cplxAdd(cplxMul({ -1, 0 }, A), cplxMul(\
P[j], Z0)))), cplxMul(Z0, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C \
), cplxMul(P[j], Z0)))), cplxMul(cplxMul(cplxAdd({ -1, 0 }, \
Qc[j]), U[j]), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(P[0], Z0))));
/* dI/dBc */
eval[0].vals[(j - 1) * 22 + 8] = { 0, 0 };
/* dI/dCc */
eval[0].vals[(j - 1) * 22 + 9] = cplxSub(cplxAdd(cplxSub(cplxMul(A, Z0), cplxMul(H, Z0 \
)), cplxMul(cplxMul(Qc[j], Z0), cplxAdd(cplxMul({ -1, 0 }, A), cplxMul(\
P[j], Z0)))), cplxMul(cplxMul(Qc[j], U[j]), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))));
/* dI/dDc */
eval[0].vals[(j - 1) * 22 + 10] = { 0, 0 };
/* dI/dFc */
eval[0].vals[(j - 1) * 22 + 11] = { 0, 0 };
/* dI/dGc */
eval[0].vals[(j - 1) * 22 + 12] = { 0, 0 };
/* dI/dHc */
eval[0].vals[(j - 1) * 22 + 13] = cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(P[j], Z0))), U[j]) \
), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, C), cplxMul(P[0], Z0))));
/* dI/dU */
eval[0].vals[(j - 1) * 22 + 13 + j] = cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0)), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(U[j], { 2, 0 })));
/* dI/dZ0 */
eval[0].vals[j * 22 - 1] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(\
cplxAdd(cplxAdd(cplxAdd(cplxSub(cplxAdd(cplxAdd(cplxAdd(cplxAdd(\
cplxAdd(cplxMul(cplxMul(Ac, P[j]), Z0), cplxMul(cplxMul(Hc, P[0]), Z0 \
)), cplxMul(cplxMul(A, Pc[j]), Z0)), cplxMul(cplxMul(H, Pc[0]), Z0 \
)), cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Pc[j]), Q[j]), Z0)), cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac \
), Cc), P[j]), Qc[j]), Z0)), cplxMul(cplxMul({ 3, 0 }, cplxAdd(cplxMul(P[j], Pc[j] \
), cplxMul(P[0], Pc[0]))), cplxPow(Z0, { 2, 0 }))), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j]), cplxAdd(cplxMul({ -1, 0 }, A \
), cplxMul(P[j], Z0)))), cplxMul(Ac, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), \
C), cplxMul(P[j], Z0)))), cplxDiv(\
cplxMul(cplxMul(Pc[0], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxSub(cplxMul({ -1, 0 }, \
A), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), U[j])), cplxMul(Hc, cplxAdd(cplxMul({ -1, 0 }, C), cplxMul(\
P[0], Z0)))), cplxMul(cplxMul(Pc[j], U[j]), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0)))), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Q[j]), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(Pc[j], Z0)))), cplxMul(\
A, cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), cplxMul(Pc[j], Z0)) \
)), cplxMul(cplxMul(P[0], U[j]), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0)) \
)), cplxMul(H, cplxAdd(cplxMul({ -1, 0 }, Cc), cplxMul(Pc[0], Z0)) \
)), cplxDiv(\
cplxMul(cplxMul(P[j], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), U[j])), cplxDiv(\
cplxMul(cplxMul(cplxMul({ 2, 0 }, Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(Pc[0], Z0))), U[j]));
/* eqnsII derivatives */
/* dII/dA */
eval[0].vals[(7 + j - 1) * 22 + 0] = { 0, 0 };
/* dII/dB */
eval[0].vals[(7 + j - 1) * 22 + 1] = cplxSub(cplxAdd(cplxMul({ -1, 0 }, cplxMul(Fc, Z0)), cplxMul(\
Pc[j], cplxPow(Z0, { 2, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dII/dC */
eval[0].vals[(7 + j - 1) * 22 + 2] = { 0, 0 };
/* dII/dD */
eval[0].vals[(7 + j - 1) * 22 + 3] = { 0, 0 };
/* dII/dF */
eval[0].vals[(7 + j - 1) * 22 + 4] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0)))), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[0], Z0))));
/* dII/dG */
eval[0].vals[(7 + j - 1) * 22 + 5] = { 0, 0 };
/* dII/dH */
eval[0].vals[(7 + j - 1) * 22 + 6] = { 0, 0 };
/* dII/dAc */
eval[0].vals[(7 + j - 1) * 22 + 7] = { 0, 0 };
/* dII/dBc */
eval[0].vals[(7 + j - 1) * 22 + 8] = cplxSub(cplxAdd(cplxMul({ -1, 0 }, cplxMul(F, Z0)), cplxMul(\
P[j], cplxPow(Z0, { 2, 0 }))), cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, F \
), cplxMul(P[0], Z0))));
/* dII/dCc */
eval[0].vals[(7 + j - 1) * 22 + 9] = { 0, 0 };
/* dII/dDc */
eval[0].vals[(7 + j - 1) * 22 + 10] = { 0, 0 };
/* dII/dFc */
eval[0].vals[(7 + j - 1) * 22 + 11] = cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), U[j])), cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[0], Z0))));
/* dII/dGc */
eval[0].vals[(7 + j - 1) * 22 + 12] = { 0, 0 };
/* dII/dHc */
eval[0].vals[(7 + j - 1) * 22 + 13] = { 0, 0 };
/* dII/dU */
eval[0].vals[(7 + j - 1) * 22 + 13 + j] = cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxPow(U[j], { 2, 0 })));
/* dII/dZ0 */
eval[0].vals[(7 + j) * 22 - 1] = cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxAdd(cplxSub(\
cplxAdd(cplxAdd(cplxAdd(cplxMul(cplxMul(cplxMul({ 2, 0 }, Bc), P[j]), Z0 \
), cplxMul(cplxMul(Fc, P[0]), Z0)), cplxMul(cplxMul(cplxMul({ \
2, 0 }, B), Pc[j]), Z0)), cplxMul(cplxMul(F, Pc[0]), Z0)), cplxMul(cplxMul({ \
3, 0 }, cplxAdd(cplxMul(P[j], Pc[j]), cplxMul(P[0], Pc[0]))), cplxPow(Z0, { 2, 0 } \
))), cplxDiv(\
cplxMul(cplxMul(Pc[0], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), U[j])), cplxMul(Fc, cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[0], Z0)))), cplxMul(cplxMul(Pc[j], U[j]), cplxAdd(cplxMul({ -1, 0 }, \
F), cplxMul(P[0], Z0)))), cplxMul(cplxMul(\
P[0], U[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0)))), cplxMul(\
F, cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[0], Z0)))), cplxDiv(\
cplxMul(cplxMul(P[j], cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), U[j])), cplxDiv(\
cplxMul(cplxMul(cplxMul({ 2, 0 }, Z0), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(Pc[0], Z0))), U[j]));
/* eqnsIII derivatives */
/* dIII/dA */
eval[0].vals[(14 + j - 1) * 22 + 0] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(cplxSub(cplxAdd({ 1, 0 } \
, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd({ -1, 0 }, \
Q[j])), cplxAdd(cplxMul({ -1, 0 }, C), H))), \
Q[j]), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxSub(cplxAdd({ 1, 0 }, cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd({ -1, 0 }, \
Q[j])), cplxAdd(cplxMul({ -1, 0 }, C), H))), \
Q[j]), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j]));
/* dIII/dB */
eval[0].vals[(14 + j - 1) * 22 + 1] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd({ -1, 0 }, cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B), F) \
)), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, B \
), F)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
B), F)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), F))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, B \
), F)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
B), F)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dC */
eval[0].vals[(14 + j - 1) * 22 + 2] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxSub(cplxAdd(cplxSub(Q[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Q[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
A), C), Q[j])), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 })), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxSub(cplxAdd(cplxSub(Q[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Q[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 }))), cplxDiv(\
cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
A), C), Q[j])), cplxMul(P[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxMul(\
Z0, cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 })), cplxDiv(\
cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(P[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dD */
eval[0].vals[(14 + j - 1) * 22 + 3] = cplxAdd(cplxSub(cplxSub(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Dc), Gc), Z0)), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), F), U[j]))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, B), F)));
/* dIII/dF */
eval[0].vals[(14 + j - 1) * 22 + 4] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 })) \
), cplxMul(cplxMul(U[j], cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), F)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, F \
), cplxMul(P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), \
F), { 2, 0 })))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), F), { 2, 0 }), U[j]) \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, B), F)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, F \
), cplxMul(P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, B), \
F), { 2, 0 })))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dG */
eval[0].vals[(14 + j - 1) * 22 + 5] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Dc \
), Gc), Z0), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, C), H))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, C), H))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), H), U[j]))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxMul({ -1, 0 }, C), H)));
/* dIII/dH */
eval[0].vals[(14 + j - 1) * 22 + 6] = cplxSub(cplxSub(cplxAdd(cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(\
cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 } \
)), cplxMul(cplxMul(U[j], cplxSub(cplxMul({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, C \
), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), \
H), { 2, 0 })))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxMul(cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), H), { 2, 0 }), U[j]) \
)), cplxMul(cplxMul(Z0, cplxSub(cplxMul({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, C \
), H))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, C), \
H), { 2, 0 })))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dAc */
eval[0].vals[(14 + j - 1) * 22 + 7] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(cplxSub(cplxAdd({ 1, 0 } \
, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd({ -1, 0 }, \
Qc[j])), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), \
Qc[j]), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxMul(cplxMul(\
cplxSub(cplxAdd({ 1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc \
), Gc), cplxAdd({ -1, 0 }, Qc[j])), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), \
Qc[j]), U[j]), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
/* dIII/dBc */
eval[0].vals[(14 + j - 1) * 22 + 8] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd({ -1, 0 }, cplxDiv(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxSub(cplxAdd({ -1, 0 }, cplxDiv(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 })) \
), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)))), U[j])), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 })) \
), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)))));
/* dIII/dCc */
eval[0].vals[(14 + j - 1) * 22 + 9] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxAdd(cplxSub(Qc[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Qc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxAdd(cplxSub(Qc[j], cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Qc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
))), cplxDiv(cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
)), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j])), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 } \
)), cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dDc */
eval[0].vals[(14 + j - 1) * 22 + 10] = cplxAdd(cplxSub(cplxSub(cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxAdd(cplxMul({ -1, 0 }, D), G), Z0)), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
Bc), Fc))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), U[j]))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)));
/* dIII/dFc */
eval[0].vals[(14 + j - 1) * 22 + 11] = cplxSub(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 }))), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), U[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), Fc), { 2, 0 }))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Fc), { 2, 0 })))), U[j])), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc)), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc \
), cplxMul(Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Fc), { 2, 0 })))));
/* dIII/dGc */
eval[0].vals[(14 + j - 1) * 22 + 12] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, D), \
G), Z0), cplxDiv(cplxMul(cplxMul(Z0, cplxAdd(cplxSub(cplxMul({ -1, 0 }, \
Ac), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
Cc), Hc))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), U[j]))), cplxDiv(\
cplxMul(cplxMul(U[j], cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)));
/* dIII/dHc */
eval[0].vals[(14 + j - 1) * 22 + 13] = cplxSub(cplxAdd(cplxSub(cplxDiv(cplxMul(cplxMul(cplxMul(cplxAdd(\
cplxMul({ -1, 0 }, Cc), Gc), Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 })), cplxDiv(\
cplxMul(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), U[j]), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 }))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxSub(cplxMul({ -1, 0 }, \
cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, \
Cc), Hc))), cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), Hc), { 2, 0 })))), U[j] \
)), cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H)))), cplxSub(cplxMul({ -1, 0 }, cplxDiv(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxPow(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc), { 2, 0 })))));
/* dIII/dU */
eval[0].vals[(14 + j - 1) * 22 + 13 + j] = cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxPow(U[j], { 2, 0 })));
/* dIII/dZ0 */
eval[0].vals[(14 + j) * 22 - 1] = cplxSub(cplxAdd(cplxAdd(cplxSub(cplxSub(cplxSub(cplxAdd(cplxSub(\
cplxAdd(cplxAdd(cplxSub(cplxMul(cplxAdd(cplxMul({ -1, 0 }, D), \
G), cplxAdd(cplxMul({ -1, 0 }, Dc), Gc)), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxDiv(\
cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), U[j])), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[j]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), U[j]), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))) \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Pc[0]), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), Z0), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))) \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[0]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[0]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), U[j]), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[j]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(\
cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxMul(cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[0]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[0]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), Z0), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), P[j]), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), P[j]), cplxAdd(cplxMul({ -1, 0 }, \
C), H))), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxDiv(\
cplxMul(cplxMul(cplxMul({ 2, 0 }, Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, \
B), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j] \
)), cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, \
Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
}
}
__device__ void JPAR(Vect<nV + 1> *Z, Vect<nP> *q, Matr<nV, nP> *eval){
Cplx A = Z[0].vals[0];
Cplx B = Z[0].vals[1];
Cplx C = Z[0].vals[2];
Cplx D = Z[0].vals[3];
Cplx F = Z[0].vals[4];
Cplx G = Z[0].vals[5];
Cplx H = Z[0].vals[6];
Cplx Ac = Z[0].vals[7];
Cplx Bc = Z[0].vals[8];
Cplx Cc = Z[0].vals[9];
Cplx Dc = Z[0].vals[10];
Cplx Fc = Z[0].vals[11];
Cplx Gc = Z[0].vals[12];
Cplx Hc = Z[0].vals[13];
Cplx *U = &Z[0].vals[14 - 1]; // subtract 1 for indexing 1 to 7
Cplx Z0 = Z[0].vals[21];
Cplx *P = &q[0].vals[0]; // indexed from 0 to 7
Cplx *Pc = &q[0].vals[8]; // indexed from 0 to 7
Cplx *Q = &q[0].vals[16 - 1]; // subtract 1 for indexing 1 to 7
Cplx *Qc = &q[0].vals[23 - 1]; // subtract 1 for indexing 1 to 7
memset(eval[0].vals, 0, sizeof(Matr<nV, nP>));
#pragma unroll
for (int j = 1; j <= 7; ++j){
/* eqnsI derivatives */
/* dI/dP0 */
eval[0].vals[(j - 1) * 30] = cplxAdd(cplxSub(cplxMul(Hc, cplxPow(Z0, { 2, 0 })), cplxMul(\
Pc[0], cplxPow(Z0, { 3, 0 }))), cplxMul(cplxMul(\
U[j], Z0), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(Pc[j], Z0))));
/* dI/dP */
eval[0].vals[(j - 1) * 30 + j] = cplxAdd(cplxSub(cplxAdd(cplxMul(Ac, cplxPow(Z0, { 2, 0 } \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), \
Cc), Qc[j]), cplxPow(Z0, { 2, 0 }))), cplxMul(Pc[j], cplxPow(Z0, { 3, 0 } \
))), cplxDiv(cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), U[j]));
/* dI/dPc0 */
eval[0].vals[(j - 1) * 30 + 8] = cplxAdd(cplxSub(cplxMul(H, cplxPow(Z0, { 2, 0 })), cplxMul(\
P[0], cplxPow(Z0, { 3, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A \
), cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(P[j], Z0))), U[j]));
/* dI/dPc */
eval[0].vals[(j - 1) * 30 + 8 + j] = cplxAdd(cplxSub(cplxAdd(cplxMul(A, cplxPow(Z0, { 2, 0 } \
)), cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Q[j]), cplxPow(Z0, { 2, 0 }))), cplxMul(P[j], cplxPow(Z0, { 3, 0 } \
))), cplxMul(cplxMul(U[j], Z0), cplxAdd(cplxMul({ -1, 0 }, H), cplxMul(\
P[0], Z0))));
/* dI/dQ */
eval[0].vals[(j - 1) * 30 + 15 + j] = cplxAdd(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, A), \
C), Z0), cplxAdd(cplxMul({ -1, 0 }, Ac), cplxMul(Pc[j], Z0))), cplxDiv(\
cplxMul(cplxMul(cplxSub(A, \
C), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, Hc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dI/dQc */
eval[0].vals[(j - 1) * 30 + 22 + j] = cplxAdd(cplxMul(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Ac), \
Cc), Z0), cplxAdd(cplxMul({ -1, 0 }, A), cplxMul(P[j], Z0)) \
), cplxMul(cplxMul(cplxSub(Ac, Cc), U[j]), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))));
/* eqnsII derivatives */
/* dII/dP0 */
eval[0].vals[(7 + j - 1) * 30] = cplxAdd(cplxSub(cplxMul(Fc, cplxPow(Z0, { 2, 0 })), cplxMul(\
Pc[0], cplxPow(Z0, { 3, 0 }))), cplxMul(cplxMul(\
U[j], Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), cplxMul(Pc[j], Z0))));
/* dII/dP */
eval[0].vals[(7 + j - 1) * 30 + j] = cplxAdd(cplxSub(cplxMul(Bc, cplxPow(Z0, { 2, 0 })), cplxMul(\
Pc[j], cplxPow(Z0, { 3, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), U[j]));
/* dII/dPc0 */
eval[0].vals[(7 + j - 1) * 30 + 8] = cplxAdd(cplxSub(cplxMul(F, cplxPow(Z0, { 2, 0 })), cplxMul(\
P[0], cplxPow(Z0, { 3, 0 }))), cplxDiv(\
cplxMul(cplxPow(Z0, { 3, 0 }), cplxAdd(cplxMul({ -1, 0 }, B), cplxMul(\
P[j], Z0))), U[j]));
/* dII/dPc */
eval[0].vals[(7 + j - 1) * 30 + 8 + j] = cplxAdd(cplxSub(cplxMul(B, cplxPow(Z0, { 2, 0 })), cplxMul(\
P[j], cplxPow(Z0, { 3, 0 }))), cplxMul(cplxMul(U[j], Z0), cplxAdd(cplxMul({ -1, 0 }, \
F), cplxMul(P[0], Z0))));
/* dII/dQ */
eval[0].vals[(7 + j - 1) * 30 + 15 + j] = { 0, 0 };
/* dII/dQc */
eval[0].vals[(7 + j - 1) * 30 + 22 + j] = { 0, 0 };
/* eqnsIII derivatives */
/* dIII/dP0 */
eval[0].vals[(14 + j - 1) * 30] = cplxSub(cplxMul(cplxMul(U[j], cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B \
), D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))));
/* dIII/dP */
eval[0].vals[(14 + j - 1) * 30 + j] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), Z0), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), Z0), cplxAdd(cplxMul({ -1, 0 }, \
C), H)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j]));
/* dIII/dPc0 */
eval[0].vals[(14 + j - 1) * 30 + 8] = cplxSub(cplxDiv(cplxMul(cplxMul(cplxPow(Z0, { 2, 0 }), cplxAdd(cplxMul({ -1, \
0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))), U[j]), cplxMul(cplxMul(\
Z0, cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc \
), Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
/* dIII/dPc */
eval[0].vals[(14 + j - 1) * 30 + 8 + j] = cplxAdd(cplxMul({ -1, 0 }, cplxMul(cplxMul(Z0, cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc)))), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxMul(cplxMul(\
U[j], cplxAdd(cplxMul({ -1, 0 }, cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc \
), Dc), Z0), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), Z0), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
/* dIII/dQ */
eval[0].vals[(14 + j - 1) * 30 + 15 + j] = cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C), cplxDiv(\
cplxMul(cplxSub(A, C), cplxAdd(cplxMul({ -1, 0 }, C), \
G)), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(Ac, Bc), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), Dc), cplxAdd(cplxMul({ -1, 0 }, Bc \
), cplxMul(Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc), cplxAdd(cplxSub(cplxMul({ -1, 0 }, Ac), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), Qc[j])), cplxMul(\
Pc[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))))), cplxDiv(\
cplxMul(cplxMul(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, A), C), cplxDiv(\
cplxMul(cplxSub(A, C), cplxAdd(cplxMul({ -1, 0 }, C), \
G)), cplxAdd(cplxMul({ -1, 0 }, C), \
H))), cplxPow(Z0, { 2, 0 })), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, Bc), \
Dc), cplxAdd(cplxMul({ -1, 0 }, Fc), cplxMul(\
Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Bc), Fc))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, Cc), Gc), cplxAdd(cplxMul({ -1, 0 }, Hc \
), cplxMul(Pc[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc)))), U[j]));
/* dIII/dQc */
eval[0].vals[(14 + j - 1) * 30 + 22 + j] = cplxAdd(cplxMul({ -1, 0 }, \
cplxMul(cplxMul(cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), cplxDiv(\
cplxMul(cplxSub(Ac, Cc), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc)), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Hc))), Z0), cplxAdd(cplxSub(cplxAdd(cplxSub(A, B), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), D), cplxAdd(cplxMul({ -1, 0 }, B \
), cplxMul(P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), \
G), cplxAdd(cplxSub(cplxMul({ -1, 0 }, A), cplxMul(\
cplxAdd(cplxMul({ -1, 0 }, A), C), Q[j])), cplxMul(\
P[j], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H))))), cplxMul(cplxMul(\
cplxAdd(cplxAdd(cplxMul({ -1, 0 }, Ac), Cc), cplxDiv(\
cplxMul(cplxSub(Ac, Cc), cplxAdd(cplxMul({ -1, 0 }, Cc), \
Gc)), cplxAdd(cplxMul({ -1, 0 }, Cc), Hc))), U[j]), cplxAdd(cplxMul({ -1, 0 }, \
cplxDiv(cplxMul(cplxAdd(cplxMul({ -1, 0 }, B), \
D), cplxAdd(cplxMul({ -1, 0 }, F), cplxMul(\
P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, B), F))), cplxDiv(\
cplxMul(cplxAdd(cplxMul({ -1, 0 }, C), G), cplxAdd(cplxMul({ -1, 0 }, H \
), cplxMul(P[0], Z0))), cplxAdd(cplxMul({ -1, 0 }, C), H)))));
}
}
|
aaf06581b51e426aaaa29b85d5292094ecfe7ab0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "umap.h"
#include "runner.h"
#include "umapparams.h"
#include <iostream>
namespace ML {
static const int TPB_X = 32;
UMAP_API::UMAP_API(UMAPParams *params): params(params){
knn = nullptr;
};
UMAP_API::~UMAP_API() {
delete knn;
}
/**
* Fits a UMAP model
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param embeddings
* an array to return the output embeddings of size (n_samples, n_components)
*/
void UMAP_API::fit(float *X, int n, int d, float *embeddings) {
this->knn = new kNN(d);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
UMAPAlgo::_fit<float, TPB_X>(X, n, d, knn, get_params(), embeddings, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void UMAP_API::fit(float *X, float *y, int n, int d, float *embeddings) {
this->knn = new kNN(d);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
UMAPAlgo::_fit<float, TPB_X>(X, y, n, d, knn, get_params(), embeddings, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
/**
* Project a set of X vectors into the embedding space.
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param embedding
* pointer to embedding array of size (embedding_n, n_components) that has been created with fit()
* @param embedding_n
* n_samples in embedding array
* @param out
* pointer to array for storing output embeddings (n, n_components)
*/
void UMAP_API::transform(float *X, int n, int d,
float *embedding, int embedding_n,
float *out) {
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
UMAPAlgo::_transform<float, TPB_X>(X, n, d,
embedding, embedding_n, knn,
get_params(), out, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
/**
* Get the UMAPParams instance
*/
UMAPParams* UMAP_API::get_params() { return this->params; }
}
| aaf06581b51e426aaaa29b85d5292094ecfe7ab0.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "umap.h"
#include "runner.h"
#include "umapparams.h"
#include <iostream>
namespace ML {
static const int TPB_X = 32;
UMAP_API::UMAP_API(UMAPParams *params): params(params){
knn = nullptr;
};
UMAP_API::~UMAP_API() {
delete knn;
}
/**
* Fits a UMAP model
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param embeddings
* an array to return the output embeddings of size (n_samples, n_components)
*/
void UMAP_API::fit(float *X, int n, int d, float *embeddings) {
this->knn = new kNN(d);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
UMAPAlgo::_fit<float, TPB_X>(X, n, d, knn, get_params(), embeddings, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void UMAP_API::fit(float *X, float *y, int n, int d, float *embeddings) {
this->knn = new kNN(d);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
UMAPAlgo::_fit<float, TPB_X>(X, y, n, d, knn, get_params(), embeddings, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
/**
* Project a set of X vectors into the embedding space.
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param embedding
* pointer to embedding array of size (embedding_n, n_components) that has been created with fit()
* @param embedding_n
* n_samples in embedding array
* @param out
* pointer to array for storing output embeddings (n, n_components)
*/
void UMAP_API::transform(float *X, int n, int d,
float *embedding, int embedding_n,
float *out) {
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
UMAPAlgo::_transform<float, TPB_X>(X, n, d,
embedding, embedding_n, knn,
get_params(), out, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
/**
* Get the UMAPParams instance
*/
UMAPParams* UMAP_API::get_params() { return this->params; }
}
|
efc1dd2304e600e018426dcd1fed1f53b1d0b33a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transform.h"
__device__ float op(float d1,float *params) {
return -d1;
}
extern "C"
__global__ void neg_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
| efc1dd2304e600e018426dcd1fed1f53b1d0b33a.cu | #include "transform.h"
__device__ float op(float d1,float *params) {
return -d1;
}
extern "C"
__global__ void neg_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
|
40903e878e7a31fa844958f4fc94a602b981aef4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "deInterleave_kernel2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_X_out = NULL;
hipMalloc(&d_X_out, XSIZE*YSIZE);
float *d_Y_out = NULL;
hipMalloc(&d_Y_out, XSIZE*YSIZE);
char *d_XY_in = NULL;
hipMalloc(&d_XY_in, XSIZE*YSIZE);
int pitch_out = 2;
int pitch_in = 2;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
deInterleave_kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
deInterleave_kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
deInterleave_kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 40903e878e7a31fa844958f4fc94a602b981aef4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "deInterleave_kernel2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_X_out = NULL;
cudaMalloc(&d_X_out, XSIZE*YSIZE);
float *d_Y_out = NULL;
cudaMalloc(&d_Y_out, XSIZE*YSIZE);
char *d_XY_in = NULL;
cudaMalloc(&d_XY_in, XSIZE*YSIZE);
int pitch_out = 2;
int pitch_in = 2;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
deInterleave_kernel2<<<gridBlock,threadBlock>>>(d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
deInterleave_kernel2<<<gridBlock,threadBlock>>>(d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
deInterleave_kernel2<<<gridBlock,threadBlock>>>(d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2f56ee8e389502be0d2e7c49088bfd68135d3c90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define NUM_OF_X_THREADS 16
#define NUM_OF_Y_THREADS 16
#define TOTAL_THREADS (NUM_OF_X_THREADS * NUM_OF_Y_THREADS)
#define TILE_WIDTH 16
/* Kernel to take input signal 1
* f(x) = x; where x = 0 to n-1
*/
__global__ void inputKernel(float *x, int n, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,i;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * NUM_OF_X_THREADS + ix;
if (idx < N)
{
if (idx < n)
{
x[idx*N] = (float)idx;
}
else
{
x[idx] = 0;
}
for(i=1;i<N;i++)
{
x[idx*N + i] = 0;
}
}
}
/* Kernel to take input signal 2
* f(x) = x*2 - x^2; where x = 0 to n-1
*/
__global__ void inputKernel2(float *x, int n, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,i;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * NUM_OF_X_THREADS + ix;
if (idx < N)
{
if (idx < n)
{
x[idx*N] = ((float)idx * 2) - ((float)idx * (float)idx);
}
else
{
x[idx] = 0;
}
for(i=1;i<N;i++)
{
x[idx*N + i] = 0;
}
}
}
/* Kernel to generate the twiddle factors
* Let twiddle factors be denoted by w.
* w = e^(2*pi/N) * k * n; where n = 0 to N-1 and k = 0 to N-1
* In w, the real and imaginary part are stored together.
* Hence, one number actually takes two index positions. Thus, w has size 2N
*/
__global__ void factorKernel(float *w, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int idx = ix * 2;
int izx = N + idx;
const float pi = 3.141592653589793238462643383;
float aw = (2.0 * pi) / (float)N;
float arg = aw * (float)ix;
/* Twiddle factors are symmetric along N/2. with change in sign, due to 180 degree phase change */
if (idx < N) {
w[idx] = cos(arg);
w[idx + 1] = sin(arg);
w[izx] = (-1) * w[idx];
w[izx+1] = (-1) * w[idx + 1];
}
}
/* Kernel to arrange real part of twiddle factors in 2D : Cos(theta)
* Let the real part of w be denoted by wr. For k*n = 1 -> wr = wr1, k*n = 2 -> wr = wr2.
* The real twiddle matrix to take the DFT looks as below:
*
* 1 1 1 1 ... 1
* 1 wr1 wr2 wr3 ... wr(N-1)
* 1 wr2 wr4 wr6 ... wr(N-2)
* 1 wr3 wr6 wr9 ... wr(N-3)
* .
* .
* .
* 1 wr(N-1) wr(N-2) wr(N-3) ... wr1
*/
__global__ void twiddleRealKernel(float *wr, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wr[idx * N + i] = 1;
} else {
wr[idx * N + 0] = 1;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wr[idx * N + i] = w[index * 2];
}
}
}
}
/* Kernel to arrange imaginary part of twiddle factors in 2D : -iSin(theta)
* Let the real part of w be denoted by wi. For k*n = 1 -> wi = wi1, k*n = 2 -> wi = wi2.
* The imaginary twiddle matrix to take the DFT looks as below:
*
* 0 0 0 0 ... 0
* 0 wi1 wi2 wi3 ... wi(N-1)
* 0 wi2 wi4 wi6 ... wi(N-2)
* 0 wi3 wi6 wi9 ... wi(N-3)
* .
* .
* .
* 0 wi(N-1) wi(N-2) wi(N-3) ... wi1
*/
__global__ void twiddleImgKernel(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = (-1) * w[index * 2 + 1];
}
}
}
}
/* Kernel to arrange imaginary part of twiddle factors in 2D for taking IDFT : +iSin(theta)
* The imaginary twiddle matrix to take IDFT is negative of imaginary twiddle matrix to take DFT
* Let imaginary twiddle matrix to take IDFT be wi2, then
* wi2 = (-1) * wi
*/
__global__ void twiddleImgKernelIDFT(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = w[index * 2 + 1];
}
}
}
}
/* Kernel to take the dot product of two matrices and storing the result
* in resultant matrix.
* ab[i] = a[i] . b[i]
*/
__global__ void dotProdKernel(float *a, float *b, float *ab, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( (idx*N) < (N*N) ) {
ab[idx * N] = a[idx *N] * b[idx * N];
}
}
/* Kernel to multiply to input matrices and storing the result in resultant matrix
* The data from the two matrices is accessed in tiles of width TILE_WIDTH.
*/
__global__ void multKernel(float *a, float *b, float *ab, int width)
{
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
// allocate tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
// calculate the row & col index to identify element to work on
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
float result = 0;
// loop over the tiles of the input in phases
for(int p = 0; p < width/TILE_WIDTH; ++p)
{
// collaboratively load tiles into shared memory: row-wise and column wise respectively
s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)];
s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col];
__syncthreads();
// dot product between row of s_a and col of s_b
for(int k = 0; k < TILE_WIDTH; ++k)
result += s_a[ty][k] * s_b[k][tx];
__syncthreads();
}
ab[row*width+col] = result;
}
/* Simple kernel to add elements of two matrices.
* In this case, we need to just add the first column of the two matrices
* as all other elements will be 0.
*/
__global__ void addMat(float *a, float *b, float *add, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if((idx*N) < (N*N))
add[idx * N] = a[idx *N] + b[idx * N];
}
/* Simple kernel to subtract elements of two matrices.
* In this case, we need to just subtract the first column of the two matrices
* as all other elements will be 0.
*/
__global__ void subMat(float *a, float *b, float *sub, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if((idx*N) < (N*N))
sub[idx * N] = a[idx * N] - b[idx * N];
}
/* Simple kernel to divide elements of matrix by N.
* In this case, we need to just divide the first column of the two matrices
* as all other elements will be 0.
*/
__global__ void divMat(float *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if((idx*N) < (N*N))
a[idx *N] /= N;
}
/* Main function */
int main(int agrc, char** argv)
{
int n, i;
n = strtol(argv[1], 0, 10);
/* Final Padding for convolution and multiplication */
int N = 2*n + (TILE_WIDTH - (2*n)%TILE_WIDTH);
/* Variables on Host */
float realRes[N][N], imgRes[N][N]; /* Resultant convolution matrix - real & img */
float x[N][N], sig2[N][N]; /* Two Input signals */
/* Variables on Device */
float *ddft_realRes, *ddft_imgRes, *d_realRes1, *d_realRes2, *d_realRes, *d_imgRes1, *d_imgRes2, *d_imgRes;
float *d_w, *dw_r, *dw_i, *d_realProd, *d_imgProd, *d_realImg2, *d_imgReal2;
float *d_x, *ddft_r, *ddft_i;
float *d_sig2, *ddft2_r, *ddft2_i;
/* Streams */
hipStream_t d_x_Stream, d_sig2_Stream,d_w_Stream, dw_r_Stream, dw_i_Stream, ddft_r_Stream, ddft_i_Stream;
hipStream_t ddft2_i_Stream, ddft2_r_Stream, d_realProd_Stream, d_imgProd_Stream;
hipStream_t d_realImg2_Stream, d_imgReal2_Stream, ddft_realRes_Stream, ddft_imgRes_Stream, d_realRes1_Stream;
hipStream_t d_realRes2_Stream, d_realRes_Stream, d_imgRes1_Stream, d_imgRes2_Stream, d_imgRes_Stream;
hipStream_t dw_i2_Stream;
/* Creating streams */
hipStreamCreate(&d_x_Stream);
hipStreamCreate(&d_sig2_Stream);
hipStreamCreate(&d_w_Stream);
hipStreamCreate(&dw_r_Stream);
hipStreamCreate(&dw_i_Stream);
hipStreamCreate(&ddft_r_Stream);
hipStreamCreate(&ddft_i_Stream);
hipStreamCreate(&ddft2_i_Stream);
hipStreamCreate(&ddft2_r_Stream);
hipStreamCreate(&d_realProd_Stream);
hipStreamCreate(&d_imgProd_Stream);
hipStreamCreate(&d_realImg2_Stream);
hipStreamCreate(&d_imgReal2_Stream);
hipStreamCreate(&ddft_realRes_Stream);
hipStreamCreate(&ddft_imgRes_Stream);
hipStreamCreate(&d_realRes1_Stream);
hipStreamCreate(&d_realRes2_Stream);
hipStreamCreate(&d_realRes_Stream);
hipStreamCreate(&d_imgRes1_Stream);
hipStreamCreate(&d_imgRes2_Stream);
hipStreamCreate(&d_imgRes_Stream);
hipStreamCreate(&dw_i2_Stream);
/* Timer */
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 numberOfThreads(NUM_OF_X_THREADS, NUM_OF_Y_THREADS);
dim3 numberOfBlocks( (TOTAL_THREADS + NUM_OF_X_THREADS -1)/NUM_OF_X_THREADS,
(TOTAL_THREADS + NUM_OF_Y_THREADS - 1)/NUM_OF_Y_THREADS );
/* Timer starts */
hipEventRecord(start, 0);
/* Allocating memory on device */
hipMalloc((void **)&ddft_realRes, N * N * sizeof(float));
hipMalloc((void **)&ddft_imgRes, N * N * sizeof(float));
hipMalloc((void **)&d_realRes1, N * N * sizeof(float));
hipMalloc((void **)&d_realRes2, N * N * sizeof(float));
hipMalloc((void **)&d_realRes, N * N * sizeof(float));
hipMalloc((void **)&d_imgRes1, N * N * sizeof(float));
hipMalloc((void **)&d_imgRes2, N * N * sizeof(float));
hipMalloc((void **)&d_imgRes, N * N * sizeof(float));
hipMalloc((void **)&d_w, 2 * N * sizeof(float));
hipMalloc((void **)&dw_r, N * N * sizeof(float));
hipMalloc((void **)&dw_i, N * N * sizeof(float));
hipMalloc((void **)&d_realProd, N * N * sizeof(float));
hipMalloc((void **)&d_imgProd, N * N * sizeof(float));
hipMalloc((void **)&d_realImg2, N * N * sizeof(float));
hipMalloc((void **)&d_imgReal2, N * N * sizeof(float));
hipMalloc((void **)&d_x,N * N * sizeof(float));
hipMalloc((void **)&ddft_r, N * N * sizeof(float));
hipMalloc((void **)&ddft_i, N * N * sizeof(float));
hipMalloc((void **)&d_sig2, N * N * sizeof(float));
hipMalloc((void **)&ddft2_r, N * N * sizeof(float));
hipMalloc((void **)&ddft2_i, N * N * sizeof(float));
// Generating the input matrix 1
hipLaunchKernelGGL(( inputKernel), dim3(numberOfBlocks), dim3(numberOfThreads),0, d_x_Stream , d_x, n, N);
hipMemcpy(x, d_x, N * N * sizeof(float), hipMemcpyDeviceToHost);
//Generating the input matrix 2
hipLaunchKernelGGL(( inputKernel2), dim3(numberOfBlocks), dim3(numberOfThreads), 0, d_sig2_Stream , d_sig2, n, N);
hipMemcpy(sig2, d_sig2, N * N * sizeof(float), hipMemcpyDeviceToHost);
printf("********************\n");
printf("Input1\n");
for(i = 0; i < n; i++)
{
printf("%0.3f\n",x[i][0]);
}
printf("*******************\n");
printf("Input2\n");
for(i = 0; i < n; i++)
{
printf("%0.3f\n",sig2[i][0]);
}
// Calculating twiddle factor
hipLaunchKernelGGL(( factorKernel), dim3((N+511)/512), dim3(512),0, d_w_Stream , d_w, (float)N);
hipStreamSynchronize(d_w_Stream);
// Calculating twiddle real matrix
hipLaunchKernelGGL(( twiddleRealKernel), dim3((N+511)/512), dim3(512),0, dw_r_Stream , dw_r, d_w, N);
// Calculating twiddle imaginary matrix
hipLaunchKernelGGL(( twiddleImgKernel), dim3((N+511)/512), dim3(512),0,dw_i_Stream , dw_i, d_w, N);
dim3 numberOfThreads_1(TILE_WIDTH, TILE_WIDTH);
dim3 numberOfBlocks_1( (N + TILE_WIDTH -1)/TILE_WIDTH, (N + TILE_WIDTH -1)/TILE_WIDTH );
hipStreamSynchronize(dw_r_Stream);
hipStreamSynchronize(d_x_Stream);
hipStreamSynchronize(d_sig2_Stream);
// Calculating real part of DFT of input matrix 1
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1),0,ddft_r_Stream , dw_r, d_x, ddft_r, N);
// Calculating real part of DFT of input matrix 2
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1), 0, ddft2_r_Stream , dw_r, d_sig2, ddft2_r, N);
hipStreamSynchronize(dw_i_Stream);
// Calculating imagine part of DFT of input matrix 1
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1),0,ddft_i_Stream , dw_i, d_x, ddft_i, N);
// Calculating imagine part of DFT of input matrix 2
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1),0,ddft2_i_Stream , dw_i, d_sig2, ddft2_i, N);
hipStreamSynchronize(ddft_r_Stream);
hipStreamSynchronize(ddft2_r_Stream);
//Multiplying the real part of two signals
hipLaunchKernelGGL(( dotProdKernel), dim3((N + 511)/512), dim3(512),0,d_realProd_Stream , ddft_r, ddft2_r, d_realProd, N);
hipStreamSynchronize(ddft_i_Stream);
hipStreamSynchronize(ddft2_i_Stream);
//Multiplying the imaginary part of the two signals
hipLaunchKernelGGL(( dotProdKernel), dim3((N + 511)/512), dim3(512),0,d_imgProd_Stream , ddft_i, ddft2_i, d_imgProd, N);
//Multiplying the real part of 1 and imaginary part of 2
hipLaunchKernelGGL(( dotProdKernel), dim3((N + 511)/512), dim3(512),0,d_realImg2_Stream , ddft_r, ddft2_i, d_realImg2, N);
//Multiplying the img part of 1 and real part of 2
hipLaunchKernelGGL(( dotProdKernel), dim3((N + 511)/512), dim3(512), 0, d_imgReal2_Stream , ddft_i, ddft2_r, d_imgReal2, N);
hipStreamSynchronize(d_realProd_Stream);
hipStreamSynchronize(d_imgProd_Stream);
// Calculating twiddle imaginary matrix for IDFT
hipLaunchKernelGGL(( twiddleImgKernelIDFT), dim3((N+511)/512), dim3(512),0,dw_i2_Stream , dw_i, d_w, N);
//Real Part of DFT of Result
hipLaunchKernelGGL(( subMat), dim3((N*N + 511)/512), dim3(512), 0, ddft_realRes_Stream , d_realProd, d_imgProd, ddft_realRes, N);
hipStreamSynchronize(d_imgReal2_Stream);
hipStreamSynchronize(d_realImg2_Stream);
//Img Part of DFT of Result
hipLaunchKernelGGL(( addMat), dim3((N*N + 511)/512), dim3(512), 0, ddft_imgRes_Stream , d_imgReal2, d_realImg2, ddft_imgRes, N);
hipStreamSynchronize(ddft_realRes_Stream);
hipStreamSynchronize(ddft_imgRes_Stream);
//Real Part of Resultant Signal after taking IDFT = Real Part of Convolution
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1), 0, d_realRes1_Stream , dw_r, ddft_realRes, d_realRes1, N);
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1), 0, d_imgRes2_Stream , dw_r, ddft_imgRes, d_imgRes2, N);
hipStreamSynchronize(dw_i2_Stream);
//Img Part of Resultant Signal after taking IDFT = Img Part of Convolution
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1), 0,d_realRes2_Stream , dw_i, ddft_imgRes, d_realRes2, N);
hipLaunchKernelGGL(( multKernel), dim3(numberOfBlocks_1), dim3(numberOfThreads_1), 0, d_imgRes1_Stream , dw_i, ddft_realRes, d_imgRes1, N);
hipStreamSynchronize(d_realRes1_Stream);
hipStreamSynchronize(d_realRes2_Stream);
hipLaunchKernelGGL(( subMat), dim3((N*N +511)/512), dim3(512), 0,d_realRes_Stream , d_realRes1, d_realRes2, d_realRes, N);
hipStreamSynchronize(d_realRes_Stream);
hipLaunchKernelGGL(( divMat), dim3((N*N + 511)/512), dim3(512), 0, 0, d_realRes, N);
hipMemcpy(realRes, d_realRes, N * N * sizeof(float), hipMemcpyDeviceToHost);
printf(Final Convolution\n);
for(i = 0; i < (2*n - 1); i++)
{
printf("%0.3f\n", realRes[i][0]);
}
hipStreamSynchronize(d_imgRes1_Stream);
hipStreamSynchronize(d_imgRes2_Stream);
hipLaunchKernelGGL(( addMat), dim3((N*N + 511)/512), dim3(512), 0, d_imgRes_Stream , d_imgRes1, d_imgRes2, d_imgRes, N);
hipStreamSynchronize(d_imgRes_Stream);
hipLaunchKernelGGL(( divMat), dim3((N*N + 511)/512), dim3(512), 0, 0, d_imgRes, N);
hipMemcpy(imgRes, d_imgRes, N * N * sizeof(float), hipMemcpyDeviceToHost);
printf("*********************************************************************************\n");
/* Timer */
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time taken : %3.1f ms\n", elapsedTime);
/* De-allocating memory on device */
hipFree(ddft_realRes);
hipFree(ddft_imgRes);
hipFree(d_realRes1);
hipFree(d_realRes2);
hipFree(d_realRes);
hipFree(d_imgRes1);
hipFree(d_imgRes2);
hipFree(d_imgRes);
hipFree(d_w);
hipFree(dw_r);
hipFree(dw_i);
hipFree(d_realProd);
hipFree(d_imgProd);
hipFree(d_realImg2);
hipFree(d_imgReal2);
hipFree(d_x);
hipFree(ddft_r);
hipFree(ddft_i);
hipFree(d_sig2);
hipFree(ddft2_r);
hipFree(ddft2_i);
return 0;
}
| 2f56ee8e389502be0d2e7c49088bfd68135d3c90.cu | #include<stdio.h>
#include<cuda.h>
#define NUM_OF_X_THREADS 16
#define NUM_OF_Y_THREADS 16
#define TOTAL_THREADS (NUM_OF_X_THREADS * NUM_OF_Y_THREADS)
#define TILE_WIDTH 16
/* Kernel to take input signal 1
* f(x) = x; where x = 0 to n-1
*/
__global__ void inputKernel(float *x, int n, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,i;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * NUM_OF_X_THREADS + ix;
if (idx < N)
{
if (idx < n)
{
x[idx*N] = (float)idx;
}
else
{
x[idx] = 0;
}
for(i=1;i<N;i++)
{
x[idx*N + i] = 0;
}
}
}
/* Kernel to take input signal 2
* f(x) = x*2 - x^2; where x = 0 to n-1
*/
__global__ void inputKernel2(float *x, int n, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,i;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * NUM_OF_X_THREADS + ix;
if (idx < N)
{
if (idx < n)
{
x[idx*N] = ((float)idx * 2) - ((float)idx * (float)idx);
}
else
{
x[idx] = 0;
}
for(i=1;i<N;i++)
{
x[idx*N + i] = 0;
}
}
}
/* Kernel to generate the twiddle factors
* Let twiddle factors be denoted by w.
* w = e^(2*pi/N) * k * n; where n = 0 to N-1 and k = 0 to N-1
* In w, the real and imaginary part are stored together.
* Hence, one number actually takes two index positions. Thus, w has size 2N
*/
__global__ void factorKernel(float *w, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int idx = ix * 2;
int izx = N + idx;
const float pi = 3.141592653589793238462643383;
float aw = (2.0 * pi) / (float)N;
float arg = aw * (float)ix;
/* Twiddle factors are symmetric along N/2. with change in sign, due to 180 degree phase change */
if (idx < N) {
w[idx] = cos(arg);
w[idx + 1] = sin(arg);
w[izx] = (-1) * w[idx];
w[izx+1] = (-1) * w[idx + 1];
}
}
/* Kernel to arrange real part of twiddle factors in 2D : Cos(theta)
* Let the real part of w be denoted by wr. For k*n = 1 -> wr = wr1, k*n = 2 -> wr = wr2.
* The real twiddle matrix to take the DFT looks as below:
*
* 1 1 1 1 ... 1
* 1 wr1 wr2 wr3 ... wr(N-1)
* 1 wr2 wr4 wr6 ... wr(N-2)
* 1 wr3 wr6 wr9 ... wr(N-3)
* .
* .
* .
* 1 wr(N-1) wr(N-2) wr(N-3) ... wr1
*/
__global__ void twiddleRealKernel(float *wr, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wr[idx * N + i] = 1;
} else {
wr[idx * N + 0] = 1;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wr[idx * N + i] = w[index * 2];
}
}
}
}
/* Kernel to arrange imaginary part of twiddle factors in 2D : -iSin(theta)
* Let the real part of w be denoted by wi. For k*n = 1 -> wi = wi1, k*n = 2 -> wi = wi2.
* The imaginary twiddle matrix to take the DFT looks as below:
*
* 0 0 0 0 ... 0
* 0 wi1 wi2 wi3 ... wi(N-1)
* 0 wi2 wi4 wi6 ... wi(N-2)
* 0 wi3 wi6 wi9 ... wi(N-3)
* .
* .
* .
* 0 wi(N-1) wi(N-2) wi(N-3) ... wi1
*/
__global__ void twiddleImgKernel(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = (-1) * w[index * 2 + 1];
}
}
}
}
/* Kernel to arrange imaginary part of twiddle factors in 2D for taking IDFT : +iSin(theta)
* The imaginary twiddle matrix to take IDFT is negative of imaginary twiddle matrix to take DFT
* Let imaginary twiddle matrix to take IDFT be wi2, then
* wi2 = (-1) * wi
*/
__global__ void twiddleImgKernelIDFT(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = w[index * 2 + 1];
}
}
}
}
/* Kernel to take the dot product of two matrices and storing the result
* in resultant matrix.
* ab[i] = a[i] . b[i]
*/
__global__ void dotProdKernel(float *a, float *b, float *ab, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( (idx*N) < (N*N) ) {
ab[idx * N] = a[idx *N] * b[idx * N];
}
}
/* Kernel to multiply to input matrices and storing the result in resultant matrix
* The data from the two matrices is accessed in tiles of width TILE_WIDTH.
*/
__global__ void multKernel(float *a, float *b, float *ab, int width)
{
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
// allocate tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
// calculate the row & col index to identify element to work on
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
float result = 0;
// loop over the tiles of the input in phases
for(int p = 0; p < width/TILE_WIDTH; ++p)
{
// collaboratively load tiles into shared memory: row-wise and column wise respectively
s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)];
s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col];
__syncthreads();
// dot product between row of s_a and col of s_b
for(int k = 0; k < TILE_WIDTH; ++k)
result += s_a[ty][k] * s_b[k][tx];
__syncthreads();
}
ab[row*width+col] = result;
}
/* Simple kernel to add elements of two matrices.
* In this case, we need to just add the first column of the two matrices
* as all other elements will be 0.
*/
__global__ void addMat(float *a, float *b, float *add, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if((idx*N) < (N*N))
add[idx * N] = a[idx *N] + b[idx * N];
}
/* Simple kernel to subtract elements of two matrices.
* In this case, we need to just subtract the first column of the two matrices
* as all other elements will be 0.
*/
__global__ void subMat(float *a, float *b, float *sub, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if((idx*N) < (N*N))
sub[idx * N] = a[idx * N] - b[idx * N];
}
/* Simple kernel to divide elements of matrix by N.
* In this case, we need to just divide the first column of the two matrices
* as all other elements will be 0.
*/
__global__ void divMat(float *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if((idx*N) < (N*N))
a[idx *N] /= N;
}
/* Main function */
int main(int agrc, char** argv)
{
int n, i;
n = strtol(argv[1], 0, 10);
/* Final Padding for convolution and multiplication */
int N = 2*n + (TILE_WIDTH - (2*n)%TILE_WIDTH);
/* Variables on Host */
float realRes[N][N], imgRes[N][N]; /* Resultant convolution matrix - real & img */
float x[N][N], sig2[N][N]; /* Two Input signals */
/* Variables on Device */
float *ddft_realRes, *ddft_imgRes, *d_realRes1, *d_realRes2, *d_realRes, *d_imgRes1, *d_imgRes2, *d_imgRes;
float *d_w, *dw_r, *dw_i, *d_realProd, *d_imgProd, *d_realImg2, *d_imgReal2;
float *d_x, *ddft_r, *ddft_i;
float *d_sig2, *ddft2_r, *ddft2_i;
/* Streams */
cudaStream_t d_x_Stream, d_sig2_Stream,d_w_Stream, dw_r_Stream, dw_i_Stream, ddft_r_Stream, ddft_i_Stream;
cudaStream_t ddft2_i_Stream, ddft2_r_Stream, d_realProd_Stream, d_imgProd_Stream;
cudaStream_t d_realImg2_Stream, d_imgReal2_Stream, ddft_realRes_Stream, ddft_imgRes_Stream, d_realRes1_Stream;
cudaStream_t d_realRes2_Stream, d_realRes_Stream, d_imgRes1_Stream, d_imgRes2_Stream, d_imgRes_Stream;
cudaStream_t dw_i2_Stream;
/* Creating streams */
cudaStreamCreate(&d_x_Stream);
cudaStreamCreate(&d_sig2_Stream);
cudaStreamCreate(&d_w_Stream);
cudaStreamCreate(&dw_r_Stream);
cudaStreamCreate(&dw_i_Stream);
cudaStreamCreate(&ddft_r_Stream);
cudaStreamCreate(&ddft_i_Stream);
cudaStreamCreate(&ddft2_i_Stream);
cudaStreamCreate(&ddft2_r_Stream);
cudaStreamCreate(&d_realProd_Stream);
cudaStreamCreate(&d_imgProd_Stream);
cudaStreamCreate(&d_realImg2_Stream);
cudaStreamCreate(&d_imgReal2_Stream);
cudaStreamCreate(&ddft_realRes_Stream);
cudaStreamCreate(&ddft_imgRes_Stream);
cudaStreamCreate(&d_realRes1_Stream);
cudaStreamCreate(&d_realRes2_Stream);
cudaStreamCreate(&d_realRes_Stream);
cudaStreamCreate(&d_imgRes1_Stream);
cudaStreamCreate(&d_imgRes2_Stream);
cudaStreamCreate(&d_imgRes_Stream);
cudaStreamCreate(&dw_i2_Stream);
/* Timer */
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 numberOfThreads(NUM_OF_X_THREADS, NUM_OF_Y_THREADS);
dim3 numberOfBlocks( (TOTAL_THREADS + NUM_OF_X_THREADS -1)/NUM_OF_X_THREADS,
(TOTAL_THREADS + NUM_OF_Y_THREADS - 1)/NUM_OF_Y_THREADS );
/* Timer starts */
cudaEventRecord(start, 0);
/* Allocating memory on device */
cudaMalloc((void **)&ddft_realRes, N * N * sizeof(float));
cudaMalloc((void **)&ddft_imgRes, N * N * sizeof(float));
cudaMalloc((void **)&d_realRes1, N * N * sizeof(float));
cudaMalloc((void **)&d_realRes2, N * N * sizeof(float));
cudaMalloc((void **)&d_realRes, N * N * sizeof(float));
cudaMalloc((void **)&d_imgRes1, N * N * sizeof(float));
cudaMalloc((void **)&d_imgRes2, N * N * sizeof(float));
cudaMalloc((void **)&d_imgRes, N * N * sizeof(float));
cudaMalloc((void **)&d_w, 2 * N * sizeof(float));
cudaMalloc((void **)&dw_r, N * N * sizeof(float));
cudaMalloc((void **)&dw_i, N * N * sizeof(float));
cudaMalloc((void **)&d_realProd, N * N * sizeof(float));
cudaMalloc((void **)&d_imgProd, N * N * sizeof(float));
cudaMalloc((void **)&d_realImg2, N * N * sizeof(float));
cudaMalloc((void **)&d_imgReal2, N * N * sizeof(float));
cudaMalloc((void **)&d_x,N * N * sizeof(float));
cudaMalloc((void **)&ddft_r, N * N * sizeof(float));
cudaMalloc((void **)&ddft_i, N * N * sizeof(float));
cudaMalloc((void **)&d_sig2, N * N * sizeof(float));
cudaMalloc((void **)&ddft2_r, N * N * sizeof(float));
cudaMalloc((void **)&ddft2_i, N * N * sizeof(float));
// Generating the input matrix 1
inputKernel<<<numberOfBlocks, numberOfThreads,0, d_x_Stream >>>(d_x, n, N);
cudaMemcpy(x, d_x, N * N * sizeof(float), cudaMemcpyDeviceToHost);
//Generating the input matrix 2
inputKernel2<<<numberOfBlocks, numberOfThreads, 0, d_sig2_Stream >>>(d_sig2, n, N);
cudaMemcpy(sig2, d_sig2, N * N * sizeof(float), cudaMemcpyDeviceToHost);
printf("********************\n");
printf("Input1\n");
for(i = 0; i < n; i++)
{
printf("%0.3f\n",x[i][0]);
}
printf("*******************\n");
printf("Input2\n");
for(i = 0; i < n; i++)
{
printf("%0.3f\n",sig2[i][0]);
}
// Calculating twiddle factor
factorKernel<<<(N+511)/512, 512,0, d_w_Stream >>>(d_w, (float)N);
cudaStreamSynchronize(d_w_Stream);
// Calculating twiddle real matrix
twiddleRealKernel<<<(N+511)/512, 512,0, dw_r_Stream >>>(dw_r, d_w, N);
// Calculating twiddle imaginary matrix
twiddleImgKernel<<<(N+511)/512, 512,0,dw_i_Stream >>>(dw_i, d_w, N);
dim3 numberOfThreads_1(TILE_WIDTH, TILE_WIDTH);
dim3 numberOfBlocks_1( (N + TILE_WIDTH -1)/TILE_WIDTH, (N + TILE_WIDTH -1)/TILE_WIDTH );
cudaStreamSynchronize(dw_r_Stream);
cudaStreamSynchronize(d_x_Stream);
cudaStreamSynchronize(d_sig2_Stream);
// Calculating real part of DFT of input matrix 1
multKernel<<<numberOfBlocks_1, numberOfThreads_1,0,ddft_r_Stream >>>(dw_r, d_x, ddft_r, N);
// Calculating real part of DFT of input matrix 2
multKernel<<<numberOfBlocks_1, numberOfThreads_1, 0, ddft2_r_Stream >>>(dw_r, d_sig2, ddft2_r, N);
cudaStreamSynchronize(dw_i_Stream);
// Calculating imagine part of DFT of input matrix 1
multKernel<<<numberOfBlocks_1, numberOfThreads_1,0,ddft_i_Stream >>>(dw_i, d_x, ddft_i, N);
// Calculating imagine part of DFT of input matrix 2
multKernel<<<numberOfBlocks_1, numberOfThreads_1,0,ddft2_i_Stream >>>(dw_i, d_sig2, ddft2_i, N);
cudaStreamSynchronize(ddft_r_Stream);
cudaStreamSynchronize(ddft2_r_Stream);
//Multiplying the real part of two signals
dotProdKernel<<<(N + 511)/512, 512,0,d_realProd_Stream >>>(ddft_r, ddft2_r, d_realProd, N);
cudaStreamSynchronize(ddft_i_Stream);
cudaStreamSynchronize(ddft2_i_Stream);
//Multiplying the imaginary part of the two signals
dotProdKernel<<<(N + 511)/512, 512,0,d_imgProd_Stream >>>(ddft_i, ddft2_i, d_imgProd, N);
//Multiplying the real part of 1 and imaginary part of 2
dotProdKernel<<<(N + 511)/512, 512,0,d_realImg2_Stream >>>(ddft_r, ddft2_i, d_realImg2, N);
//Multiplying the img part of 1 and real part of 2
dotProdKernel<<<(N + 511)/512, 512, 0, d_imgReal2_Stream >>>(ddft_i, ddft2_r, d_imgReal2, N);
cudaStreamSynchronize(d_realProd_Stream);
cudaStreamSynchronize(d_imgProd_Stream);
// Calculating twiddle imaginary matrix for IDFT
twiddleImgKernelIDFT<<<(N+511)/512, 512,0,dw_i2_Stream >>>(dw_i, d_w, N);
//Real Part of DFT of Result
subMat<<<(N*N + 511)/512, 512, 0, ddft_realRes_Stream >>>(d_realProd, d_imgProd, ddft_realRes, N);
cudaStreamSynchronize(d_imgReal2_Stream);
cudaStreamSynchronize(d_realImg2_Stream);
//Img Part of DFT of Result
addMat<<<(N*N + 511)/512, 512, 0, ddft_imgRes_Stream >>>(d_imgReal2, d_realImg2, ddft_imgRes, N);
cudaStreamSynchronize(ddft_realRes_Stream);
cudaStreamSynchronize(ddft_imgRes_Stream);
//Real Part of Resultant Signal after taking IDFT = Real Part of Convolution
multKernel<<<numberOfBlocks_1, numberOfThreads_1, 0, d_realRes1_Stream >>>(dw_r, ddft_realRes, d_realRes1, N);
multKernel<<<numberOfBlocks_1, numberOfThreads_1, 0, d_imgRes2_Stream >>>(dw_r, ddft_imgRes, d_imgRes2, N);
cudaStreamSynchronize(dw_i2_Stream);
//Img Part of Resultant Signal after taking IDFT = Img Part of Convolution
multKernel<<<numberOfBlocks_1, numberOfThreads_1, 0,d_realRes2_Stream >>>(dw_i, ddft_imgRes, d_realRes2, N);
multKernel<<<numberOfBlocks_1, numberOfThreads_1, 0, d_imgRes1_Stream >>>(dw_i, ddft_realRes, d_imgRes1, N);
cudaStreamSynchronize(d_realRes1_Stream);
cudaStreamSynchronize(d_realRes2_Stream);
subMat<<<(N*N +511)/512, 512, 0,d_realRes_Stream >>>(d_realRes1, d_realRes2, d_realRes, N);
cudaStreamSynchronize(d_realRes_Stream);
divMat<<<(N*N + 511)/512, 512>>>(d_realRes, N);
cudaMemcpy(realRes, d_realRes, N * N * sizeof(float), cudaMemcpyDeviceToHost);
printf(“Final Convolution\n”);
for(i = 0; i < (2*n - 1); i++)
{
printf("%0.3f\n", realRes[i][0]);
}
cudaStreamSynchronize(d_imgRes1_Stream);
cudaStreamSynchronize(d_imgRes2_Stream);
addMat<<<(N*N + 511)/512, 512, 0, d_imgRes_Stream >>>(d_imgRes1, d_imgRes2, d_imgRes, N);
cudaStreamSynchronize(d_imgRes_Stream);
divMat<<<(N*N + 511)/512, 512>>>(d_imgRes, N);
cudaMemcpy(imgRes, d_imgRes, N * N * sizeof(float), cudaMemcpyDeviceToHost);
printf("*********************************************************************************\n");
/* Timer */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time taken : %3.1f ms\n", elapsedTime);
/* De-allocating memory on device */
cudaFree(ddft_realRes);
cudaFree(ddft_imgRes);
cudaFree(d_realRes1);
cudaFree(d_realRes2);
cudaFree(d_realRes);
cudaFree(d_imgRes1);
cudaFree(d_imgRes2);
cudaFree(d_imgRes);
cudaFree(d_w);
cudaFree(dw_r);
cudaFree(dw_i);
cudaFree(d_realProd);
cudaFree(d_imgProd);
cudaFree(d_realImg2);
cudaFree(d_imgReal2);
cudaFree(d_x);
cudaFree(ddft_r);
cudaFree(ddft_i);
cudaFree(d_sig2);
cudaFree(ddft2_r);
cudaFree(ddft2_i);
return 0;
}
|
b6605ca2660baefc3a37ee77982f79b9135cc764.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, Carnegie Mellon University
* See LICENSE for details
*/
/***************************************************************************
* SPL Matrix *
* *
* Computes matrix that corresponds to SPL generated routine *
***************************************************************************/
#include <limits.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_cuda.h>
#ifndef MIN
#define MIN(a,b) (((a)<(b))?(a):(b))
#endif
#ifndef ROWS
#error ROWS must be defined
#endif
#ifndef COLUMNS
#error COLUMNS must be defined
#endif
hipfftDoubleReal *Input, *Output;
hipfftDoubleReal *dev_in, *dev_out;
void initialize(int argc, char **argv) {
// In many case ROWS & COLUMNS are equal; however, when they are not it is
// important to use the correct one when allocating memory for the in/out
// buffers. The *input* buffer should be dimensioned by COLUMNS, while the
// *output* buffer should be dimensioned by ROWS
hipHostMalloc ( &Input, sizeof(hipfftDoubleReal) * COLUMNS );
checkCudaErrors(hipGetLastError());
hipHostMalloc ( &Output, sizeof(hipfftDoubleReal) * ROWS );
checkCudaErrors(hipGetLastError());
hipMalloc ( &dev_in, sizeof(hipfftDoubleReal) * COLUMNS );
checkCudaErrors(hipGetLastError());
hipMalloc ( &dev_out, sizeof(hipfftDoubleReal) * ROWS );
checkCudaErrors(hipGetLastError());
INITFUNC();
}
void finalize() {
hipHostFree (Output);
hipHostFree (Input);
hipFree (dev_out);
hipFree (dev_in);
}
void compute_vector()
{
int indx;
printf("[ ");
hipMemcpy ( dev_in, Input, sizeof(hipfftDoubleReal) * COLUMNS, hipMemcpyHostToDevice);
checkCudaErrors(hipGetLastError());
FUNC(dev_out, dev_in);
checkCudaErrors(hipGetLastError());
hipDeviceSynchronize();
hipMemcpy ( Output, dev_out, sizeof(hipfftDoubleReal) * ROWS, hipMemcpyDeviceToHost);
checkCudaErrors(hipGetLastError());
for (indx = 0; indx < ROWS; indx++) {
if (indx != 0) {
printf(", ");
}
printf("FloatString(\"%.18g\")", Output[indx]);
}
printf("];\n");
}
int main(int argc, char** argv) {
initialize(argc, argv);
int tlen = sizeof(testvector) / sizeof(testvector[0]);
for (int i = 0; i < MIN(tlen, COLUMNS); i++) {
Input[i] = (hipfftDoubleReal)testvector[i];
}
compute_vector();
finalize();
return EXIT_SUCCESS;
}
| b6605ca2660baefc3a37ee77982f79b9135cc764.cu | /*
* Copyright (c) 2018-2020, Carnegie Mellon University
* See LICENSE for details
*/
/***************************************************************************
* SPL Matrix *
* *
* Computes matrix that corresponds to SPL generated routine *
***************************************************************************/
#include <limits.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_cuda.h>
#ifndef MIN
#define MIN(a,b) (((a)<(b))?(a):(b))
#endif
#ifndef ROWS
#error ROWS must be defined
#endif
#ifndef COLUMNS
#error COLUMNS must be defined
#endif
cufftDoubleReal *Input, *Output;
cufftDoubleReal *dev_in, *dev_out;
void initialize(int argc, char **argv) {
// In many case ROWS & COLUMNS are equal; however, when they are not it is
// important to use the correct one when allocating memory for the in/out
// buffers. The *input* buffer should be dimensioned by COLUMNS, while the
// *output* buffer should be dimensioned by ROWS
cudaMallocHost ( &Input, sizeof(cufftDoubleReal) * COLUMNS );
checkCudaErrors(cudaGetLastError());
cudaMallocHost ( &Output, sizeof(cufftDoubleReal) * ROWS );
checkCudaErrors(cudaGetLastError());
cudaMalloc ( &dev_in, sizeof(cufftDoubleReal) * COLUMNS );
checkCudaErrors(cudaGetLastError());
cudaMalloc ( &dev_out, sizeof(cufftDoubleReal) * ROWS );
checkCudaErrors(cudaGetLastError());
INITFUNC();
}
void finalize() {
cudaFreeHost (Output);
cudaFreeHost (Input);
cudaFree (dev_out);
cudaFree (dev_in);
}
void compute_vector()
{
int indx;
printf("[ ");
cudaMemcpy ( dev_in, Input, sizeof(cufftDoubleReal) * COLUMNS, cudaMemcpyHostToDevice);
checkCudaErrors(cudaGetLastError());
FUNC(dev_out, dev_in);
checkCudaErrors(cudaGetLastError());
cudaDeviceSynchronize();
cudaMemcpy ( Output, dev_out, sizeof(cufftDoubleReal) * ROWS, cudaMemcpyDeviceToHost);
checkCudaErrors(cudaGetLastError());
for (indx = 0; indx < ROWS; indx++) {
if (indx != 0) {
printf(", ");
}
printf("FloatString(\"%.18g\")", Output[indx]);
}
printf("];\n");
}
int main(int argc, char** argv) {
initialize(argc, argv);
int tlen = sizeof(testvector) / sizeof(testvector[0]);
for (int i = 0; i < MIN(tlen, COLUMNS); i++) {
Input[i] = (cufftDoubleReal)testvector[i];
}
compute_vector();
finalize();
return EXIT_SUCCESS;
}
|
7bf125cc396902c230f576c039ec460c244d6d02.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef HARMONIC_SUMMING_KERNEL_H_
#define HARMONIC_SUMMING_KERNEL_H_
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "headers/params.h"
__global__ void PHS_GPU_kernel(float const* __restrict__ d_input, float *d_output_SNR, ushort *d_output_harmonics, float *d_MSD, int nTimesamples, int nSpectra, int nHarmonics){
float signal_mean=d_MSD[0];
float signal_sd=d_MSD[1];
float HS_value, temp_SNR, SNR;
ushort max_SNR_harmonic;
int pos;
// reading 0th harmonic, i.e. fundamental frequency
pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x;
if( (blockIdx.y*blockDim.x + threadIdx.x)<nSpectra ){
HS_value = __ldg(&d_input[pos]);
SNR = (HS_value - signal_mean)/(signal_sd);
max_SNR_harmonic = 0;
if(blockIdx.x>0) {
for(int f=1; f<nHarmonics; f++){
if( (blockIdx.x + f*blockIdx.x)<nTimesamples ){
pos = (blockIdx.x + f*blockIdx.x)*nSpectra + blockIdx.y*blockDim.x + threadIdx.x;
HS_value = HS_value + __ldg(&d_input[pos]);
temp_SNR = __frsqrt_rn(f+1)*(HS_value - signal_mean*(f+1))/(signal_sd); //assuming white noise
if(temp_SNR > SNR){
SNR = temp_SNR;
max_SNR_harmonic = f;
}
}
}
}
pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x;
d_output_SNR[pos] = SNR;
d_output_harmonics[pos] = max_SNR_harmonic;
}
}
#endif | 7bf125cc396902c230f576c039ec460c244d6d02.cu |
#ifndef HARMONIC_SUMMING_KERNEL_H_
#define HARMONIC_SUMMING_KERNEL_H_
#include <cuda.h>
#include <cuda_runtime.h>
#include "headers/params.h"
__global__ void PHS_GPU_kernel(float const* __restrict__ d_input, float *d_output_SNR, ushort *d_output_harmonics, float *d_MSD, int nTimesamples, int nSpectra, int nHarmonics){
float signal_mean=d_MSD[0];
float signal_sd=d_MSD[1];
float HS_value, temp_SNR, SNR;
ushort max_SNR_harmonic;
int pos;
// reading 0th harmonic, i.e. fundamental frequency
pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x;
if( (blockIdx.y*blockDim.x + threadIdx.x)<nSpectra ){
HS_value = __ldg(&d_input[pos]);
SNR = (HS_value - signal_mean)/(signal_sd);
max_SNR_harmonic = 0;
if(blockIdx.x>0) {
for(int f=1; f<nHarmonics; f++){
if( (blockIdx.x + f*blockIdx.x)<nTimesamples ){
pos = (blockIdx.x + f*blockIdx.x)*nSpectra + blockIdx.y*blockDim.x + threadIdx.x;
HS_value = HS_value + __ldg(&d_input[pos]);
temp_SNR = __frsqrt_rn(f+1)*(HS_value - signal_mean*(f+1))/(signal_sd); //assuming white noise
if(temp_SNR > SNR){
SNR = temp_SNR;
max_SNR_harmonic = f;
}
}
}
}
pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x;
d_output_SNR[pos] = SNR;
d_output_harmonics[pos] = max_SNR_harmonic;
}
}
#endif |
41dfa97726ce3dd44205ea0266bd24ebf4a56c9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "CellListGPU.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
/*! \file CellListGPU.cu
\brief Defines GPU kernel code for cell list generation on the GPU
*/
//! Kernel that computes the cell list on the GPU
/*! \param d_cell_size Number of particles in each cell
\param d_xyzf Cell XYZF data array
\param d_tdb Cell TDB data array
\param d_cell_orientation Particle orientation in cell list
\param d_cell_idx Particle index in cell list
\param d_conditions Conditions flags for detecting overflow and other error conditions
\param d_pos Particle position array
\param d_orientation Particle orientation array
\param d_charge Particle charge array
\param d_diameter Particle diameter array
\param d_body Particle body array
\param N Number of particles
\param n_ghost Number of ghost particles
\param Nmax Maximum number of particles that can be placed in a single cell
\param flag_charge Set to true to store charge in the flag position in \a d_xyzf
\param flag_type Set to true to store type in the flag position in \a d_xyzf
\param box Box dimensions
\param ci Indexer to compute cell id from cell grid coords
\param cli Indexer to index into \a d_xyzf and \a d_tdb
\param ghost_width Width of ghost layer
\note Optimized for Fermi
*/
__global__ void gpu_compute_cell_list_kernel(unsigned int* d_cell_size,
Scalar4* d_xyzf,
Scalar4* d_tdb,
Scalar4* d_cell_orientation,
unsigned int* d_cell_idx,
uint3* d_conditions,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar* d_charge,
const Scalar* d_diameter,
const unsigned int* d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim box,
const Index3D ci,
const Index2D cli,
const Scalar3 ghost_width,
const unsigned int nwork,
const unsigned int offset)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nwork)
return;
idx += offset;
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar flag = 0;
Scalar diameter = 0;
Scalar body = 0;
Scalar type = postype.w;
Scalar4 orientation = make_scalar4(0, 0, 0, 0);
if (d_tdb != NULL)
{
diameter = d_diameter[idx];
body = __int_as_scalar(d_body[idx]);
}
if (d_cell_orientation != NULL)
{
orientation = d_orientation[idx];
}
if (flag_charge)
flag = d_charge[idx];
else if (flag_type)
flag = type;
else
flag = __int_as_scalar(idx);
// check for nan pos
if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z))
{
(*d_conditions).y = idx + 1;
return;
}
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos, ghost_width);
// check if the particle is inside the unit cell + ghost layer in all dimensions
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001))
|| (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001))
|| (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)))
{
// if a ghost particle is out of bounds, silently ignore it
if (idx < N)
(*d_conditions).z = idx + 1;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
unsigned int bin = ci(ib, jb, kb);
// all particles should be in a valid cell
// all particles should be in a valid cell
if (ib < 0 || ib >= (int)ci.getW() || jb < 0 || jb >= (int)ci.getH() || kb < 0
|| kb >= (int)ci.getD())
{
// but ghost particles that are out of range should not produce an error
if (idx < N)
{
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).z, idx + 1);
#else
atomicMax(&(*d_conditions).z, idx + 1);
#endif
}
return;
}
unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff);
if (size < Nmax)
{
unsigned int write_pos = cli(size, bin);
if (d_xyzf != NULL)
d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag);
if (d_tdb != NULL)
d_tdb[write_pos] = make_scalar4(type, diameter, body, 0);
if (d_cell_orientation != NULL)
d_cell_orientation[write_pos] = orientation;
if (d_cell_idx != NULL)
d_cell_idx[write_pos] = idx;
}
else
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, size + 1);
#else
atomicMax(&(*d_conditions).x, size + 1);
#endif
}
}
void gpu_compute_cell_list(unsigned int* d_cell_size,
Scalar4* d_xyzf,
Scalar4* d_tdb,
Scalar4* d_cell_orientation,
unsigned int* d_cell_idx,
uint3* d_conditions,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar* d_charge,
const Scalar* d_diameter,
const unsigned int* d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim& box,
const Index3D& ci,
const Index2D& cli,
const Scalar3& ghost_width,
const unsigned int block_size,
const GPUPartition& gpu_partition)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(&gpu_compute_cell_list_kernel));
max_block_size = attr.maxThreadsPerBlock;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// process ghosts in final range
if (idev == (int)gpu_partition.getNumActiveGPUs() - 1)
nwork += n_ghost;
unsigned int run_block_size = min(block_size, max_block_size);
int n_blocks = nwork / run_block_size + 1;
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_compute_cell_list_kernel),
dim3(n_blocks),
dim3(run_block_size),
0,
0,
d_cell_size + idev * ci.getNumElements(),
d_xyzf ? d_xyzf + idev * cli.getNumElements() : 0,
d_tdb ? d_tdb + idev * cli.getNumElements() : 0,
d_cell_orientation ? d_cell_orientation + idev * cli.getNumElements()
: 0,
d_cell_idx ? d_cell_idx + idev * cli.getNumElements() : 0,
d_conditions,
d_pos,
d_orientation,
d_charge,
d_diameter,
d_body,
N,
n_ghost,
Nmax,
flag_charge,
flag_type,
box,
ci,
cli,
ghost_width,
nwork,
range.first);
}
}
__global__ void gpu_fill_indices_kernel(unsigned int cl_size,
uint2* d_idx,
unsigned int* d_sort_permutation,
unsigned int* d_cell_idx,
unsigned int* d_cell_size,
Index3D ci,
Index2D cli)
{
unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int icell = cell_idx / cli.getW();
unsigned int pidx = UINT_MAX;
if (icell < ci.getNumElements())
{
unsigned int my_cell_size = d_cell_size[icell];
unsigned int ilocal = cell_idx % cli.getW();
if (ilocal < my_cell_size)
{
pidx = d_cell_idx[cell_idx];
}
}
// pack cell idx and particle idx into uint2
uint2 result;
result.x = icell;
result.y = pidx;
// write out result
d_idx[cell_idx] = result;
// write identity permutation
d_sort_permutation[cell_idx] = cell_idx;
}
//! Lexicographic comparison operator on uint2
struct comp_less_uint2
{
__device__ bool operator()(const uint2& a, const uint2& b)
{
return a.x < b.x || (a.x == b.x && a.y < b.y);
}
};
//! Kernel to combine ngpu cell lists into one, in parallel
__global__ void gpu_combine_cell_lists_kernel(const unsigned int* d_cell_size_scratch,
unsigned int* d_cell_size,
const unsigned int* d_idx_scratch,
unsigned int* d_idx,
const Scalar4* d_xyzf_scratch,
Scalar4* d_xyzf,
const Scalar4* d_tdb_scratch,
Scalar4* d_tdb,
const Scalar4* d_cell_orientation_scratch,
Scalar4* d_cell_orientation,
const Index2D cli,
unsigned int igpu,
unsigned int ngpu,
const unsigned int Nmax,
uint3* d_conditions)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= cli.getNumElements())
return;
uint2 p = cli.getPair(idx);
unsigned int local_idx = p.x;
unsigned int bin = p.y;
// reduce cell sizes for 0..igpu
unsigned int local_size;
unsigned int offset = 0;
unsigned int total_size = 0;
for (unsigned int i = 0; i < ngpu; ++i)
{
unsigned int sz = d_cell_size_scratch[bin + i * cli.getH()];
if (i == igpu)
local_size = sz;
if (i < igpu)
offset += sz;
total_size += sz;
}
// write out cell size total on GPU 0
if (igpu == 0 && local_idx == 0)
d_cell_size[bin] = total_size;
// is local_idx within bounds?
if (local_idx >= local_size)
return;
unsigned int out_idx = offset + local_idx;
if (out_idx >= Nmax)
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, out_idx + 1);
#else
atomicMax(&(*d_conditions).x, out_idx + 1);
#endif
return;
}
unsigned int write_pos = cli(out_idx, bin);
// copy over elements
if (d_idx)
d_idx[write_pos] = d_idx_scratch[idx + igpu * cli.getNumElements()];
if (d_xyzf)
d_xyzf[write_pos] = d_xyzf_scratch[idx + igpu * cli.getNumElements()];
if (d_tdb)
d_tdb[write_pos] = d_tdb_scratch[idx + igpu * cli.getNumElements()];
if (d_cell_orientation)
d_cell_orientation[write_pos]
= d_cell_orientation_scratch[idx + igpu * cli.getNumElements()];
}
/*! Driver function to sort the cell lists from different GPUs into one
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size_scratch List of cell sizes (per GPU)
\param d_cell_size List of cell sizes
\param d_cell_idx_scratch List particle index (per GPU)
\param d_cell_idx List particle index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param block_size GPU block size
\param gpu_partition multi-GPU partition
*/
hipError_t gpu_combine_cell_lists(const unsigned int* d_cell_size_scratch,
unsigned int* d_cell_size,
const unsigned int* d_idx_scratch,
unsigned int* d_idx,
const Scalar4* d_xyzf_scratch,
Scalar4* d_xyzf,
const Scalar4* d_tdb_scratch,
Scalar4* d_tdb,
const Scalar4* d_cell_orientation_scratch,
Scalar4* d_cell_orientation,
const Index2D cli,
unsigned int ngpu,
const unsigned int block_size,
const unsigned int Nmax,
uint3* d_conditions,
const GPUPartition& gpu_partition)
{
dim3 threads(block_size);
dim3 grid(cli.getNumElements() / block_size + 1);
// copy together cell lists in parallel
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
gpu_partition.getRangeAndSetGPU(idev);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_combine_cell_lists_kernel),
grid,
threads,
0,
0,
d_cell_size_scratch,
d_cell_size,
d_idx_scratch,
d_idx,
d_xyzf_scratch,
d_xyzf,
d_tdb_scratch,
d_tdb,
d_cell_orientation_scratch,
d_cell_orientation,
cli,
idev,
ngpu,
Nmax,
d_conditions);
}
return hipSuccess;
}
__global__ void gpu_apply_sorted_cell_list_order(unsigned int cl_size,
unsigned int* d_cell_idx,
unsigned int* d_cell_idx_new,
Scalar4* d_xyzf,
Scalar4* d_xyzf_new,
Scalar4* d_tdb,
Scalar4* d_tdb_new,
Scalar4* d_cell_orientation,
Scalar4* d_cell_orientation_new,
unsigned int* d_sort_permutation,
Index2D cli)
{
unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int perm_idx = d_sort_permutation[cell_idx];
if (d_xyzf)
d_xyzf_new[cell_idx] = d_xyzf[perm_idx];
if (d_cell_idx)
d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx];
if (d_tdb)
d_tdb_new[cell_idx] = d_tdb[perm_idx];
if (d_cell_orientation)
d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx];
}
/*! Driver function to sort the cell list on the GPU
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size List of cell sizes
\param d_xyzf List of coordinates and flag
\param d_tdb List type diameter and body index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
*/
hipError_t gpu_sort_cell_list(unsigned int* d_cell_size,
Scalar4* d_xyzf,
Scalar4* d_xyzf_new,
Scalar4* d_tdb,
Scalar4* d_tdb_new,
Scalar4* d_cell_orientation,
Scalar4* d_cell_orientation_new,
unsigned int* d_cell_idx,
unsigned int* d_cell_idx_new,
uint2* d_sort_idx,
unsigned int* d_sort_permutation,
const Index3D ci,
const Index2D cli)
{
unsigned int block_size = 256;
// fill indices table with cell idx/particle idx pairs
dim3 threads(block_size);
dim3 grid(cli.getNumElements() / block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_fill_indices_kernel),
grid,
threads,
0,
0,
cli.getNumElements(),
d_sort_idx,
d_sort_permutation,
d_cell_idx,
d_cell_size,
ci,
cli);
// locality sort on those pairs
thrust::device_ptr<uint2> d_sort_idx_thrust(d_sort_idx);
thrust::device_ptr<unsigned int> d_sort_permutation_thrust(d_sort_permutation);
thrust::sort_by_key(d_sort_idx_thrust,
d_sort_idx_thrust + cli.getNumElements(),
d_sort_permutation_thrust,
comp_less_uint2());
// apply sorted order
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_apply_sorted_cell_list_order),
grid,
threads,
0,
0,
cli.getNumElements(),
d_cell_idx,
d_cell_idx_new,
d_xyzf,
d_xyzf_new,
d_tdb,
d_tdb_new,
d_cell_orientation,
d_cell_orientation_new,
d_sort_permutation,
cli);
// copy back permuted arrays to original ones
if (d_xyzf)
hipMemcpy(d_xyzf,
d_xyzf_new,
sizeof(Scalar4) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
hipMemcpy(d_cell_idx,
d_cell_idx_new,
sizeof(unsigned int) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
if (d_tdb)
{
hipMemcpy(d_tdb,
d_tdb_new,
sizeof(Scalar4) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
}
if (d_cell_orientation)
{
hipMemcpy(d_cell_orientation,
d_cell_orientation_new,
sizeof(Scalar4) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
}
return hipSuccess;
}
| 41dfa97726ce3dd44205ea0266bd24ebf4a56c9d.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "CellListGPU.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
/*! \file CellListGPU.cu
\brief Defines GPU kernel code for cell list generation on the GPU
*/
//! Kernel that computes the cell list on the GPU
/*! \param d_cell_size Number of particles in each cell
\param d_xyzf Cell XYZF data array
\param d_tdb Cell TDB data array
\param d_cell_orientation Particle orientation in cell list
\param d_cell_idx Particle index in cell list
\param d_conditions Conditions flags for detecting overflow and other error conditions
\param d_pos Particle position array
\param d_orientation Particle orientation array
\param d_charge Particle charge array
\param d_diameter Particle diameter array
\param d_body Particle body array
\param N Number of particles
\param n_ghost Number of ghost particles
\param Nmax Maximum number of particles that can be placed in a single cell
\param flag_charge Set to true to store charge in the flag position in \a d_xyzf
\param flag_type Set to true to store type in the flag position in \a d_xyzf
\param box Box dimensions
\param ci Indexer to compute cell id from cell grid coords
\param cli Indexer to index into \a d_xyzf and \a d_tdb
\param ghost_width Width of ghost layer
\note Optimized for Fermi
*/
__global__ void gpu_compute_cell_list_kernel(unsigned int* d_cell_size,
Scalar4* d_xyzf,
Scalar4* d_tdb,
Scalar4* d_cell_orientation,
unsigned int* d_cell_idx,
uint3* d_conditions,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar* d_charge,
const Scalar* d_diameter,
const unsigned int* d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim box,
const Index3D ci,
const Index2D cli,
const Scalar3 ghost_width,
const unsigned int nwork,
const unsigned int offset)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nwork)
return;
idx += offset;
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar flag = 0;
Scalar diameter = 0;
Scalar body = 0;
Scalar type = postype.w;
Scalar4 orientation = make_scalar4(0, 0, 0, 0);
if (d_tdb != NULL)
{
diameter = d_diameter[idx];
body = __int_as_scalar(d_body[idx]);
}
if (d_cell_orientation != NULL)
{
orientation = d_orientation[idx];
}
if (flag_charge)
flag = d_charge[idx];
else if (flag_type)
flag = type;
else
flag = __int_as_scalar(idx);
// check for nan pos
if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z))
{
(*d_conditions).y = idx + 1;
return;
}
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos, ghost_width);
// check if the particle is inside the unit cell + ghost layer in all dimensions
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001))
|| (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001))
|| (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)))
{
// if a ghost particle is out of bounds, silently ignore it
if (idx < N)
(*d_conditions).z = idx + 1;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
unsigned int bin = ci(ib, jb, kb);
// all particles should be in a valid cell
// all particles should be in a valid cell
if (ib < 0 || ib >= (int)ci.getW() || jb < 0 || jb >= (int)ci.getH() || kb < 0
|| kb >= (int)ci.getD())
{
// but ghost particles that are out of range should not produce an error
if (idx < N)
{
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).z, idx + 1);
#else
atomicMax(&(*d_conditions).z, idx + 1);
#endif
}
return;
}
unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff);
if (size < Nmax)
{
unsigned int write_pos = cli(size, bin);
if (d_xyzf != NULL)
d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag);
if (d_tdb != NULL)
d_tdb[write_pos] = make_scalar4(type, diameter, body, 0);
if (d_cell_orientation != NULL)
d_cell_orientation[write_pos] = orientation;
if (d_cell_idx != NULL)
d_cell_idx[write_pos] = idx;
}
else
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, size + 1);
#else
atomicMax(&(*d_conditions).x, size + 1);
#endif
}
}
void gpu_compute_cell_list(unsigned int* d_cell_size,
Scalar4* d_xyzf,
Scalar4* d_tdb,
Scalar4* d_cell_orientation,
unsigned int* d_cell_idx,
uint3* d_conditions,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar* d_charge,
const Scalar* d_diameter,
const unsigned int* d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim& box,
const Index3D& ci,
const Index2D& cli,
const Scalar3& ghost_width,
const unsigned int block_size,
const GPUPartition& gpu_partition)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(&gpu_compute_cell_list_kernel));
max_block_size = attr.maxThreadsPerBlock;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// process ghosts in final range
if (idev == (int)gpu_partition.getNumActiveGPUs() - 1)
nwork += n_ghost;
unsigned int run_block_size = min(block_size, max_block_size);
int n_blocks = nwork / run_block_size + 1;
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_compute_cell_list_kernel),
dim3(n_blocks),
dim3(run_block_size),
0,
0,
d_cell_size + idev * ci.getNumElements(),
d_xyzf ? d_xyzf + idev * cli.getNumElements() : 0,
d_tdb ? d_tdb + idev * cli.getNumElements() : 0,
d_cell_orientation ? d_cell_orientation + idev * cli.getNumElements()
: 0,
d_cell_idx ? d_cell_idx + idev * cli.getNumElements() : 0,
d_conditions,
d_pos,
d_orientation,
d_charge,
d_diameter,
d_body,
N,
n_ghost,
Nmax,
flag_charge,
flag_type,
box,
ci,
cli,
ghost_width,
nwork,
range.first);
}
}
__global__ void gpu_fill_indices_kernel(unsigned int cl_size,
uint2* d_idx,
unsigned int* d_sort_permutation,
unsigned int* d_cell_idx,
unsigned int* d_cell_size,
Index3D ci,
Index2D cli)
{
unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int icell = cell_idx / cli.getW();
unsigned int pidx = UINT_MAX;
if (icell < ci.getNumElements())
{
unsigned int my_cell_size = d_cell_size[icell];
unsigned int ilocal = cell_idx % cli.getW();
if (ilocal < my_cell_size)
{
pidx = d_cell_idx[cell_idx];
}
}
// pack cell idx and particle idx into uint2
uint2 result;
result.x = icell;
result.y = pidx;
// write out result
d_idx[cell_idx] = result;
// write identity permutation
d_sort_permutation[cell_idx] = cell_idx;
}
//! Lexicographic comparison operator on uint2
struct comp_less_uint2
{
__device__ bool operator()(const uint2& a, const uint2& b)
{
return a.x < b.x || (a.x == b.x && a.y < b.y);
}
};
//! Kernel to combine ngpu cell lists into one, in parallel
__global__ void gpu_combine_cell_lists_kernel(const unsigned int* d_cell_size_scratch,
unsigned int* d_cell_size,
const unsigned int* d_idx_scratch,
unsigned int* d_idx,
const Scalar4* d_xyzf_scratch,
Scalar4* d_xyzf,
const Scalar4* d_tdb_scratch,
Scalar4* d_tdb,
const Scalar4* d_cell_orientation_scratch,
Scalar4* d_cell_orientation,
const Index2D cli,
unsigned int igpu,
unsigned int ngpu,
const unsigned int Nmax,
uint3* d_conditions)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= cli.getNumElements())
return;
uint2 p = cli.getPair(idx);
unsigned int local_idx = p.x;
unsigned int bin = p.y;
// reduce cell sizes for 0..igpu
unsigned int local_size;
unsigned int offset = 0;
unsigned int total_size = 0;
for (unsigned int i = 0; i < ngpu; ++i)
{
unsigned int sz = d_cell_size_scratch[bin + i * cli.getH()];
if (i == igpu)
local_size = sz;
if (i < igpu)
offset += sz;
total_size += sz;
}
// write out cell size total on GPU 0
if (igpu == 0 && local_idx == 0)
d_cell_size[bin] = total_size;
// is local_idx within bounds?
if (local_idx >= local_size)
return;
unsigned int out_idx = offset + local_idx;
if (out_idx >= Nmax)
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, out_idx + 1);
#else
atomicMax(&(*d_conditions).x, out_idx + 1);
#endif
return;
}
unsigned int write_pos = cli(out_idx, bin);
// copy over elements
if (d_idx)
d_idx[write_pos] = d_idx_scratch[idx + igpu * cli.getNumElements()];
if (d_xyzf)
d_xyzf[write_pos] = d_xyzf_scratch[idx + igpu * cli.getNumElements()];
if (d_tdb)
d_tdb[write_pos] = d_tdb_scratch[idx + igpu * cli.getNumElements()];
if (d_cell_orientation)
d_cell_orientation[write_pos]
= d_cell_orientation_scratch[idx + igpu * cli.getNumElements()];
}
/*! Driver function to sort the cell lists from different GPUs into one
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size_scratch List of cell sizes (per GPU)
\param d_cell_size List of cell sizes
\param d_cell_idx_scratch List particle index (per GPU)
\param d_cell_idx List particle index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param block_size GPU block size
\param gpu_partition multi-GPU partition
*/
hipError_t gpu_combine_cell_lists(const unsigned int* d_cell_size_scratch,
unsigned int* d_cell_size,
const unsigned int* d_idx_scratch,
unsigned int* d_idx,
const Scalar4* d_xyzf_scratch,
Scalar4* d_xyzf,
const Scalar4* d_tdb_scratch,
Scalar4* d_tdb,
const Scalar4* d_cell_orientation_scratch,
Scalar4* d_cell_orientation,
const Index2D cli,
unsigned int ngpu,
const unsigned int block_size,
const unsigned int Nmax,
uint3* d_conditions,
const GPUPartition& gpu_partition)
{
dim3 threads(block_size);
dim3 grid(cli.getNumElements() / block_size + 1);
// copy together cell lists in parallel
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
gpu_partition.getRangeAndSetGPU(idev);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_combine_cell_lists_kernel),
grid,
threads,
0,
0,
d_cell_size_scratch,
d_cell_size,
d_idx_scratch,
d_idx,
d_xyzf_scratch,
d_xyzf,
d_tdb_scratch,
d_tdb,
d_cell_orientation_scratch,
d_cell_orientation,
cli,
idev,
ngpu,
Nmax,
d_conditions);
}
return hipSuccess;
}
__global__ void gpu_apply_sorted_cell_list_order(unsigned int cl_size,
unsigned int* d_cell_idx,
unsigned int* d_cell_idx_new,
Scalar4* d_xyzf,
Scalar4* d_xyzf_new,
Scalar4* d_tdb,
Scalar4* d_tdb_new,
Scalar4* d_cell_orientation,
Scalar4* d_cell_orientation_new,
unsigned int* d_sort_permutation,
Index2D cli)
{
unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int perm_idx = d_sort_permutation[cell_idx];
if (d_xyzf)
d_xyzf_new[cell_idx] = d_xyzf[perm_idx];
if (d_cell_idx)
d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx];
if (d_tdb)
d_tdb_new[cell_idx] = d_tdb[perm_idx];
if (d_cell_orientation)
d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx];
}
/*! Driver function to sort the cell list on the GPU
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size List of cell sizes
\param d_xyzf List of coordinates and flag
\param d_tdb List type diameter and body index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
*/
hipError_t gpu_sort_cell_list(unsigned int* d_cell_size,
Scalar4* d_xyzf,
Scalar4* d_xyzf_new,
Scalar4* d_tdb,
Scalar4* d_tdb_new,
Scalar4* d_cell_orientation,
Scalar4* d_cell_orientation_new,
unsigned int* d_cell_idx,
unsigned int* d_cell_idx_new,
uint2* d_sort_idx,
unsigned int* d_sort_permutation,
const Index3D ci,
const Index2D cli)
{
unsigned int block_size = 256;
// fill indices table with cell idx/particle idx pairs
dim3 threads(block_size);
dim3 grid(cli.getNumElements() / block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_fill_indices_kernel),
grid,
threads,
0,
0,
cli.getNumElements(),
d_sort_idx,
d_sort_permutation,
d_cell_idx,
d_cell_size,
ci,
cli);
// locality sort on those pairs
thrust::device_ptr<uint2> d_sort_idx_thrust(d_sort_idx);
thrust::device_ptr<unsigned int> d_sort_permutation_thrust(d_sort_permutation);
thrust::sort_by_key(d_sort_idx_thrust,
d_sort_idx_thrust + cli.getNumElements(),
d_sort_permutation_thrust,
comp_less_uint2());
// apply sorted order
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_apply_sorted_cell_list_order),
grid,
threads,
0,
0,
cli.getNumElements(),
d_cell_idx,
d_cell_idx_new,
d_xyzf,
d_xyzf_new,
d_tdb,
d_tdb_new,
d_cell_orientation,
d_cell_orientation_new,
d_sort_permutation,
cli);
// copy back permuted arrays to original ones
if (d_xyzf)
hipMemcpy(d_xyzf,
d_xyzf_new,
sizeof(Scalar4) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
hipMemcpy(d_cell_idx,
d_cell_idx_new,
sizeof(unsigned int) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
if (d_tdb)
{
hipMemcpy(d_tdb,
d_tdb_new,
sizeof(Scalar4) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
}
if (d_cell_orientation)
{
hipMemcpy(d_cell_orientation,
d_cell_orientation_new,
sizeof(Scalar4) * cli.getNumElements(),
hipMemcpyDeviceToDevice);
}
return hipSuccess;
}
|
593bfce35fe5432178a17a8d8850f6233849dc62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.h>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
if (!self.is_contiguous()) {
self = self.contiguous();
}
Tensor linearIndex, src, expandedValue = value;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) {
auto expanded_size = at::DimVector(expandedValue.sizes());
auto size1 = expandedValue.sizes();
auto size2 = linearIndex.sizes();
if (are_expandable(size1, size2)) {
expanded_size = infer_size_dimvector(size1, size2);
}
if (nElemBefore > 1) {
expanded_size.insert(expanded_size.begin(), nElemBefore);
}
expandedValue = expandedValue.expand(expanded_size);
}
expandedValue = expandedValue.contiguous();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if (defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11030) || defined(USE_ROCM)
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::radix_sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(
linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(),
"number of flattened indices did not match number of elements in the value tensor: ",
linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
const int warp_size = at::cuda::warp_size();
dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (warp_size*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(warp_size, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
expandedValue.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
expandedValue.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
if (permuted) {
self.copy_(src_.permute(inversePerm));
}
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
if (!result.is_same(self)) result.copy_(self);
// Scalars are treated as 1-d tensor
Tensor self_ = (result.dim() == 0) ? result.view(1) : result;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(result.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
result.index_put_(indices, source * alpha, true);
return;
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(result) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
TORCH_IMPL_FUNC(index_add_cuda_out)
(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
index_add_cuda_impl(self, dim, index, source, alpha, result);
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out = at::empty({0}, self.options());
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
Tensor index_select_quantized_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
Tensor out = at::empty_quantized({0}, self);
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
| 593bfce35fe5432178a17a8d8850f6233849dc62.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.h>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
if (!self.is_contiguous()) {
self = self.contiguous();
}
Tensor linearIndex, src, expandedValue = value;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) {
auto expanded_size = at::DimVector(expandedValue.sizes());
auto size1 = expandedValue.sizes();
auto size2 = linearIndex.sizes();
if (are_expandable(size1, size2)) {
expanded_size = infer_size_dimvector(size1, size2);
}
if (nElemBefore > 1) {
expanded_size.insert(expanded_size.begin(), nElemBefore);
}
expandedValue = expandedValue.expand(expanded_size);
}
expandedValue = expandedValue.contiguous();
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if (defined(CUDA_VERSION) && CUDA_VERSION < 11030) || defined(USE_ROCM)
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::radix_sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(
linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(),
"number of flattened indices did not match number of elements in the value tensor: ",
linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
const int warp_size = at::cuda::warp_size();
dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (warp_size*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(warp_size, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
expandedValue.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
expandedValue.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
if (permuted) {
self.copy_(src_.permute(inversePerm));
}
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
if (!result.is_same(self)) result.copy_(self);
// Scalars are treated as 1-d tensor
Tensor self_ = (result.dim() == 0) ? result.view(1) : result;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(result.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" );
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims");
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
result.index_put_(indices, source * alpha, true);
return;
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(result) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
TORCH_IMPL_FUNC(index_add_cuda_out)
(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& source, const Scalar& alpha, const Tensor& result) {
index_add_cuda_impl(self, dim, index, source, alpha, result);
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out = at::empty({0}, self.options());
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
Tensor index_select_quantized_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
Tensor out = at::empty_quantized({0}, self);
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
|
0c39bafe7d841e6c21c87dd9a058096987fd4d4b.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/HIPSolver.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#ifdef USE_CUSOLVER
namespace at {
namespace native {
inline static Tensor column_major_identity_matrix_like(const Tensor& self) {
auto size = self.sizes();
auto size_slice = IntArrayRef(size.data(), size.size()-1);
return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1);
}
template <typename scalar_t>
inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) {
// self_inv_ptr should already be an identity matrix
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr);
at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr);
}
template <typename scalar_t>
static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
const int n = cuda_int_cast(self.size(-2), "self.size(-2)");
const int lda = std::max<int>(1, n);
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_getrf_data = infos_getrf.data_ptr<int>();
auto infos_getrs_data = infos_getrs.data_ptr<int>();
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of
// calling the batched cublas routine.
if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) {
for (int64_t i = 0; i < batch_size; i++) {
auto dataPtr = allocator.allocate(sizeof(int) * lda);
int* pivot = reinterpret_cast<int*>(dataPtr.get());
int* infos_getrf_working_ptr = &infos_getrf_data[i];
int* infos_getrs_working_ptr = &infos_getrs_data[i];
_apply_single_inverse_helper<scalar_t>(
&self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda);
}
} else {
// cublas batched kernels require input be "device array of device pointers"
Tensor self_array = at::arange(
reinterpret_cast<int64_t>(self_data),
reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1,
static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
Tensor self_inv_array = at::arange(
reinterpret_cast<int64_t>(self_inv_data),
reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1,
static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda);
int* ipiv_array = reinterpret_cast<int*>(dataPtr.get());
at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, infos_getrf_data, batch_size);
at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size);
}
}
template <typename scalar_t>
static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
int n = cuda_int_cast(self.size(-2), "self.size(-2)");
int lda = std::max<int>(1, n);
Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt));
_apply_single_inverse_helper<scalar_t>(
self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda);
}
// This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib'
Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) {
// assuming result is in column major order and contains the matrices to invert
Tensor input_working_copy = cloneBatchedColumnMajor(result);
// for getrf + getrs (cusolver path)
// result should be filled with identity matrices
result.zero_();
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
const int batch_size = cuda_int_cast(batchCount(result), "batchCount");
if (result.dim() > 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
input_working_copy, result, infos_getrf, infos_getrs);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs);
});
}
return result;
}
// entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched
Tensor _inverse_helper_cuda_lib(const Tensor& self) {
Tensor self_working_copy = cloneBatchedColumnMajor(self);
Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy);
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
if (self.dim() > 2 && batch_size > 1) {
Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
} else {
Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
}
return self_inv_working_copy;
}
// call cusolver gesvdj function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
for(int i = 0; i < batchsize; i++){
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
hipsolverGesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdj<scalar_t>(
handle, jobz, /*econ=*/ some ? 1 : 0, m, n,
self_data + i * self_stride,
lda,
S_data + i * S_stride,
U_data + i * U_stride,
lda,
VT_data + i * VT_stride,
ldvt,
infos.data_ptr<int>() + i,
gesvdj_params
);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
}
// wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] {
_apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some);
});
}
// call cusolver gesvdj batched function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got "
"m = ", m, " n = ", n);
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
hipsolverGesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetSortEig(gesvdj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdjBatched<scalar_t>(
handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt,
infos.data_ptr<int>(), gesvdj_params, batchsize
);
TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params));
}
// wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] {
_apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv);
});
}
// entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt));
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
const int64_t k = ::min(m, n);
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = \
_create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true);
// U, S, V working copies are already column majored now
// heuristic for using `gesvdjBatched` over `gesvdj`
if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) {
apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv);
} else {
apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some);
}
// A device-host sync will be performed.
batchCheckErrors(infos, "svd_cuda");
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
/*
The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q,
from a sequence of elementary reflectors, such as produced by the geqrf function.
Args:
* `self` - Tensor with the directions of the elementary reflectors below the diagonal,
it will be overwritten with the result
* `tau` - Tensor containing the magnitudes of the elementary reflectors
* `infos` - Tensor to store cuSOLVER's error codes
* `n_columns` - The number of columns of Q to be computed
For further details, please see the cuSOLVER documentation for ORGQR and UNGQR.
*/
template <typename scalar_t>
inline void apply_orgqr_cusolver(Tensor& self, const Tensor& tau, Tensor& infos, int64_t n_columns) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<int>();
auto self_matrix_stride = matrixStride(self);
auto batchsize = cuda_int_cast(batchCount(self), "batch size");
auto m = cuda_int_cast(self.size(-2), "m");
auto n = cuda_int_cast(n_columns, "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto tau_stride = std::max<int>(1, k);
auto lda = std::max<int>(1, m);
// LAPACK's requirement
TORCH_INTERNAL_ASSERT(m >= n);
TORCH_INTERNAL_ASSERT(n >= k);
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::orgqr_buffersize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork);
for (auto i = decltype(batchsize){0}; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::orgqr<scalar_t>(
handle, m, n, k,
self_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr
);
}
}
// This is a type dispatching helper function for 'apply_orgqr_cusolver'
Tensor& orgqr_helper_cuda_lib(Tensor& result, const Tensor& tau, Tensor& infos, int64_t n_columns) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{
apply_orgqr_cusolver<scalar_t>(result, tau, infos, n_columns);
});
return result;
}
}} // namespace at::native
#endif // USE_CUSOLVER
| 0c39bafe7d841e6c21c87dd9a058096987fd4d4b.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/CUDASolver.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#ifdef USE_CUSOLVER
namespace at {
namespace native {
inline static Tensor column_major_identity_matrix_like(const Tensor& self) {
auto size = self.sizes();
auto size_slice = IntArrayRef(size.data(), size.size()-1);
return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1);
}
template <typename scalar_t>
inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) {
// self_inv_ptr should already be an identity matrix
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr);
at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr);
}
template <typename scalar_t>
static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
const int n = cuda_int_cast(self.size(-2), "self.size(-2)");
const int lda = std::max<int>(1, n);
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_getrf_data = infos_getrf.data_ptr<int>();
auto infos_getrs_data = infos_getrs.data_ptr<int>();
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
// Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of
// calling the batched cublas routine.
if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) {
for (int64_t i = 0; i < batch_size; i++) {
auto dataPtr = allocator.allocate(sizeof(int) * lda);
int* pivot = reinterpret_cast<int*>(dataPtr.get());
int* infos_getrf_working_ptr = &infos_getrf_data[i];
int* infos_getrs_working_ptr = &infos_getrs_data[i];
_apply_single_inverse_helper<scalar_t>(
&self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda);
}
} else {
// cublas batched kernels require input be "device array of device pointers"
Tensor self_array = at::arange(
reinterpret_cast<int64_t>(self_data),
reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1,
static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
Tensor self_inv_array = at::arange(
reinterpret_cast<int64_t>(self_inv_data),
reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1,
static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong));
auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda);
int* ipiv_array = reinterpret_cast<int*>(dataPtr.get());
at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, infos_getrf_data, batch_size);
at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda,
ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size);
}
}
template <typename scalar_t>
static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) {
int n = cuda_int_cast(self.size(-2), "self.size(-2)");
int lda = std::max<int>(1, n);
Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt));
_apply_single_inverse_helper<scalar_t>(
self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda);
}
// This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib'
Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) {
// assuming result is in column major order and contains the matrices to invert
Tensor input_working_copy = cloneBatchedColumnMajor(result);
// for getrf + getrs (cusolver path)
// result should be filled with identity matrices
result.zero_();
result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
const int batch_size = cuda_int_cast(batchCount(result), "batchCount");
if (result.dim() > 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
input_working_copy, result, infos_getrf, infos_getrs);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs);
});
}
return result;
}
// entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched
Tensor _inverse_helper_cuda_lib(const Tensor& self) {
Tensor self_working_copy = cloneBatchedColumnMajor(self);
Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy);
const int batch_size = cuda_int_cast(batchCount(self), "batchCount");
if (self.dim() > 2 && batch_size > 1) {
Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse_lib<scalar_t>(
self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
} else {
Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt));
Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs);
});
batchCheckErrors(infos_getrf, "inverse_cuda");
batchCheckErrors(infos_getrs, "inverse_cuda");
}
return self_inv_working_copy;
}
// call cusolver gesvdj function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
for(int i = 0; i < batchsize; i++){
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
gesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdj<scalar_t>(
handle, jobz, /*econ=*/ some ? 1 : 0, m, n,
self_data + i * self_stride,
lda,
S_data + i * S_stride,
U_data + i * U_stride,
lda,
VT_data + i * VT_stride,
ldvt,
infos.data_ptr<int>() + i,
gesvdj_params
);
TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
}
// wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] {
_apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some);
});
}
// call cusolver gesvdj batched function to calculate svd
template<typename scalar_t>
inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
int batchsize = cuda_int_cast(batchCount(self), "batch size");
int m = cuda_int_cast(self.size(-2), "m");
int n = cuda_int_cast(self.size(-1), "n");
int lda = std::max<int>(1, m);
int ldvt = std::max<int>(1, n);
TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got "
"m = ", m, " n = ", n);
// gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU
gesvdjInfo_t gesvdj_params;
TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7));
// TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15));
TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetSortEig(gesvdj_params, 1));
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
at::cuda::solver::gesvdjBatched<scalar_t>(
handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt,
infos.data_ptr<int>(), gesvdj_params, batchsize
);
TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
// wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch,
// creates a working copy of the input, and creates V^H from the V returned by gesvdj
inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) {
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
Tensor self_working_copy = cloneBatchedColumnMajor(self);
VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] {
_apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv);
});
}
// entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) {
const int64_t batch_size = batchCount(self);
at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt));
const int64_t m = self.size(-2);
const int64_t n = self.size(-1);
const int64_t k = std::min(m, n);
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = \
_create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true);
// U, S, V working copies are already column majored now
// heuristic for using `gesvdjBatched` over `gesvdj`
if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) {
apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv);
} else {
apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some);
}
// A device-host sync will be performed.
batchCheckErrors(infos, "svd_cuda");
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
/*
The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q,
from a sequence of elementary reflectors, such as produced by the geqrf function.
Args:
* `self` - Tensor with the directions of the elementary reflectors below the diagonal,
it will be overwritten with the result
* `tau` - Tensor containing the magnitudes of the elementary reflectors
* `infos` - Tensor to store cuSOLVER's error codes
* `n_columns` - The number of columns of Q to be computed
For further details, please see the cuSOLVER documentation for ORGQR and UNGQR.
*/
template <typename scalar_t>
inline void apply_orgqr_cusolver(Tensor& self, const Tensor& tau, Tensor& infos, int64_t n_columns) {
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto tau_data = tau.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<int>();
auto self_matrix_stride = matrixStride(self);
auto batchsize = cuda_int_cast(batchCount(self), "batch size");
auto m = cuda_int_cast(self.size(-2), "m");
auto n = cuda_int_cast(n_columns, "n");
auto k = cuda_int_cast(tau.size(-1), "k");
auto tau_stride = std::max<int>(1, k);
auto lda = std::max<int>(1, m);
// LAPACK's requirement
TORCH_INTERNAL_ASSERT(m >= n);
TORCH_INTERNAL_ASSERT(n >= k);
// get the optimal work size and allocate workspace tensor
int lwork;
at::cuda::solver::orgqr_buffersize<scalar_t>(
at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork);
for (auto i = decltype(batchsize){0}; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
int* info_working_ptr = &infos_data[i];
auto handle = at::cuda::getCurrentCUDASolverDnHandle();
// allocate workspace storage
auto& allocator = *at::cuda::getCUDADeviceAllocator();
auto work_data = allocator.allocate(sizeof(scalar_t)*lwork);
at::cuda::solver::orgqr<scalar_t>(
handle, m, n, k,
self_working_ptr,
lda,
tau_working_ptr,
static_cast<scalar_t*>(work_data.get()),
lwork,
info_working_ptr
);
}
}
// This is a type dispatching helper function for 'apply_orgqr_cusolver'
Tensor& orgqr_helper_cuda_lib(Tensor& result, const Tensor& tau, Tensor& infos, int64_t n_columns) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{
apply_orgqr_cusolver<scalar_t>(result, tau, infos, n_columns);
});
return result;
}
}} // namespace at::native
#endif // USE_CUSOLVER
|
bf271cefaf3aca23cc06d2339e783932aedcf74b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/Utils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/Resize.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/SpectralOpsUtils.h>
#include <ATen/native/hip/CuFFTUtils.h>
#include <ATen/native/hip/CuFFTPlanCache.h>
#include <c10/util/accumulate.h>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <cmath>
#include <vector>
namespace at { namespace native {
using namespace at::native::detail;
// Offset calculator for indexing in Hermitian mirrored order.
// In mirrored dims, maps linear index i to (n - i) % n
template <typename index_t>
struct HermitianSymmetryOffsetCalculator {
using offset_type = at::detail::Array<index_t, 1>;
using dim_type = std::remove_cv_t<decltype(MAX_DIMS)>;
dim_type dims;
IntDivider<index_t> sizes_[MAX_DIMS];
index_t strides_[MAX_DIMS];
uint32_t mirror_dim_; // bit mask
static_assert(MAX_DIMS < 32, "Need a bigger mask type");
HermitianSymmetryOffsetCalculator(
IntArrayRef sizes, IntArrayRef strides, IntArrayRef dim,
const int64_t element_size){
TORCH_INTERNAL_ASSERT(sizes.size() == strides.size());
TORCH_INTERNAL_ASSERT(sizes.size() <= MAX_DIMS);
dims = sizes.size();
for (dim_type i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<index_t>(sizes[i]);
strides_[i] = strides[i] / element_size;
} else {
sizes_[i] = IntDivider<index_t>(1);
strides_[i] = 0;
}
}
mirror_dim_ = 0;
for (int64_t i = 0; i < dim.size(); ++i) {
mirror_dim_ |= (uint32_t{1} << dim[i]);
}
}
C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
index_t offset = 0;
for (dim_type dim = 0; dim < dims; ++dim) {
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
if ((mirror_dim_ & (uint32_t{1} << dim)) == 0) {
offset += divmod.mod * strides_[dim];
} else if (divmod.mod != 0) {
offset += (sizes_[dim].divisor - divmod.mod) * strides_[dim];
}
}
offset_type offsets;
offsets[0] = offset;
return offsets;
}
};
// out[:] = conj(in[:]) where in and out ordering is generalized by offset calculators
template <typename scalar_t, typename inp_calc_t, typename out_calc_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void _fft_conjugate_copy_kernel(
int64_t numel, scalar_t * out_data, const scalar_t * in_data,
inp_calc_t ic, out_calc_t oc) {
CUDA_KERNEL_LOOP_TYPE(index, numel, int64_t) {
auto in_offset = ic.get(index)[0];
auto out_offset = oc.get(index)[0];
out_data[out_offset] = std::conj(in_data[in_offset]);
}
}
// In real-to-complex transform, cuFFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following function fills in the other half with symmetry in
// case of real-to-complex transform with onesided=False flag.
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
// input should be a tensor of same size as full (twosided)
// signals, but only contains half (onesided) of the values.
// This function modifies inplace.
void _fft_fill_with_conjugate_symmetry_cuda_(
ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef signal_half_sizes,
IntArrayRef in_strides, const void * in_data,
IntArrayRef out_strides, void * out_data) {
// Do the actual conjugate mirroring.
// TODO: consider adding a 32bit indexed kernel for improved performance
auto* in_strides_ptr = in_strides.data();
const int ndim = in_strides.size();
const int64_t element_size = scalarTypeToTypeMeta(dtype).itemsize();
OffsetCalculator<1, int64_t> input_offset_calculator(
ndim, signal_half_sizes.data(), &in_strides_ptr, &element_size);
HermitianSymmetryOffsetCalculator<int64_t> output_offset_calculator(
signal_half_sizes, out_strides, mirror_dims, element_size);
const auto numel = c10::multiply_integers(signal_half_sizes);
AT_DISPATCH_COMPLEX_TYPES(dtype, "_fft_fill_with_conjugate_symmetry", [&] {
using namespace cuda::detail;
hipLaunchKernelGGL(( _fft_conjugate_copy_kernel),
dim3(GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
numel,
static_cast<scalar_t*>(out_data),
static_cast<const scalar_t*>(in_data),
input_offset_calculator,
output_offset_calculator);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
REGISTER_DISPATCH(fft_fill_with_conjugate_symmetry_stub, &_fft_fill_with_conjugate_symmetry_cuda_);
}} // at::native
| bf271cefaf3aca23cc06d2339e783932aedcf74b.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/Utils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/Resize.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/SpectralOpsUtils.h>
#include <ATen/native/cuda/CuFFTUtils.h>
#include <ATen/native/cuda/CuFFTPlanCache.h>
#include <c10/util/accumulate.h>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <cmath>
#include <vector>
namespace at { namespace native {
using namespace at::native::detail;
// Offset calculator for indexing in Hermitian mirrored order.
// In mirrored dims, maps linear index i to (n - i) % n
template <typename index_t>
struct HermitianSymmetryOffsetCalculator {
using offset_type = at::detail::Array<index_t, 1>;
using dim_type = std::remove_cv_t<decltype(MAX_DIMS)>;
dim_type dims;
IntDivider<index_t> sizes_[MAX_DIMS];
index_t strides_[MAX_DIMS];
uint32_t mirror_dim_; // bit mask
static_assert(MAX_DIMS < 32, "Need a bigger mask type");
HermitianSymmetryOffsetCalculator(
IntArrayRef sizes, IntArrayRef strides, IntArrayRef dim,
const int64_t element_size){
TORCH_INTERNAL_ASSERT(sizes.size() == strides.size());
TORCH_INTERNAL_ASSERT(sizes.size() <= MAX_DIMS);
dims = sizes.size();
for (dim_type i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<index_t>(sizes[i]);
strides_[i] = strides[i] / element_size;
} else {
sizes_[i] = IntDivider<index_t>(1);
strides_[i] = 0;
}
}
mirror_dim_ = 0;
for (int64_t i = 0; i < dim.size(); ++i) {
mirror_dim_ |= (uint32_t{1} << dim[i]);
}
}
C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
index_t offset = 0;
for (dim_type dim = 0; dim < dims; ++dim) {
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
if ((mirror_dim_ & (uint32_t{1} << dim)) == 0) {
offset += divmod.mod * strides_[dim];
} else if (divmod.mod != 0) {
offset += (sizes_[dim].divisor - divmod.mod) * strides_[dim];
}
}
offset_type offsets;
offsets[0] = offset;
return offsets;
}
};
// out[:] = conj(in[:]) where in and out ordering is generalized by offset calculators
template <typename scalar_t, typename inp_calc_t, typename out_calc_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void _fft_conjugate_copy_kernel(
int64_t numel, scalar_t * out_data, const scalar_t * in_data,
inp_calc_t ic, out_calc_t oc) {
CUDA_KERNEL_LOOP_TYPE(index, numel, int64_t) {
auto in_offset = ic.get(index)[0];
auto out_offset = oc.get(index)[0];
out_data[out_offset] = std::conj(in_data[in_offset]);
}
}
// In real-to-complex transform, cuFFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following function fills in the other half with symmetry in
// case of real-to-complex transform with onesided=False flag.
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
// input should be a tensor of same size as full (twosided)
// signals, but only contains half (onesided) of the values.
// This function modifies inplace.
void _fft_fill_with_conjugate_symmetry_cuda_(
ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef signal_half_sizes,
IntArrayRef in_strides, const void * in_data,
IntArrayRef out_strides, void * out_data) {
// Do the actual conjugate mirroring.
// TODO: consider adding a 32bit indexed kernel for improved performance
auto* in_strides_ptr = in_strides.data();
const int ndim = in_strides.size();
const int64_t element_size = scalarTypeToTypeMeta(dtype).itemsize();
OffsetCalculator<1, int64_t> input_offset_calculator(
ndim, signal_half_sizes.data(), &in_strides_ptr, &element_size);
HermitianSymmetryOffsetCalculator<int64_t> output_offset_calculator(
signal_half_sizes, out_strides, mirror_dims, element_size);
const auto numel = c10::multiply_integers(signal_half_sizes);
AT_DISPATCH_COMPLEX_TYPES(dtype, "_fft_fill_with_conjugate_symmetry", [&] {
using namespace cuda::detail;
_fft_conjugate_copy_kernel<<<
GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
numel,
static_cast<scalar_t*>(out_data),
static_cast<const scalar_t*>(in_data),
input_offset_calculator,
output_offset_calculator);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
REGISTER_DISPATCH(fft_fill_with_conjugate_symmetry_stub, &_fft_fill_with_conjugate_symmetry_cuda_);
}} // at::native
|
b60fb489b8d7441a324e1bfa1b2f5e884a203232.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <hip/hip_runtime.h>
// #include <helper_cuda.h>
// the __global__ keyword changes the function to a CUDA Kernel
__global__
void add(int n, float *x, float *y, float *z)
{
// index of the current thread within it's block
int index = blockIdx.x * blockDim.x + threadIdx.x;
// number of threads in the block
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] + y[i];
}
int main(void)
{
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n->%s\n",
static_cast<int>(error_id), hipGetErrorString(error_id));
exit(EXIT_FAILURE);
}
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int maxThreadsPerMultiProcessor = deviceProp.maxThreadsPerMultiProcessor;
int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
std::cout << "Max Threads Per Multi Processor: " << maxThreadsPerMultiProcessor << std::endl;
std::cout << "Max Threads Per Block: " << maxThreadsPerBlock << std::endl;
int N = 1<<20;
// create and allocate the memory
// this is called Unified Memory - accessible from CPU or GPU
float *x, *y, *z;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
hipMallocManaged(&z, N*sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 2.0f;
y[i] = 5.0f;
}
// Run kernal of 1M elements on the gpu
// 1.
// 2. Number of threads in a thread block
int blocksize = maxThreadsPerBlock;
int numblocks = (N + blocksize - 1) / blocksize;
hipLaunchKernelGGL(( add), dim3(numblocks), dim3(blocksize), 0, 0, N, x, y, z);
// wait for gpu to finish before accessing on host
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
// std::cout << "Amount: " << z[i] << std::endl;
maxError = fmax(maxError, fabs(z[i] - 7.0f));
}
std::cout << "Max Error: " << maxError << std::endl;
// free memory
hipFree(x);
hipFree(y);
return 0;
} | b60fb489b8d7441a324e1bfa1b2f5e884a203232.cu | #include <iostream>
#include <math.h>
#include <cuda_runtime.h>
// #include <helper_cuda.h>
// the __global__ keyword changes the function to a CUDA Kernel
__global__
void add(int n, float *x, float *y, float *z)
{
// index of the current thread within it's block
int index = blockIdx.x * blockDim.x + threadIdx.x;
// number of threads in the block
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] + y[i];
}
int main(void)
{
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n->%s\n",
static_cast<int>(error_id), cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int maxThreadsPerMultiProcessor = deviceProp.maxThreadsPerMultiProcessor;
int maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
std::cout << "Max Threads Per Multi Processor: " << maxThreadsPerMultiProcessor << std::endl;
std::cout << "Max Threads Per Block: " << maxThreadsPerBlock << std::endl;
int N = 1<<20;
// create and allocate the memory
// this is called Unified Memory - accessible from CPU or GPU
float *x, *y, *z;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&z, N*sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 2.0f;
y[i] = 5.0f;
}
// Run kernal of 1M elements on the gpu
// 1.
// 2. Number of threads in a thread block
int blocksize = maxThreadsPerBlock;
int numblocks = (N + blocksize - 1) / blocksize;
add<<<numblocks, blocksize>>>(N, x, y, z);
// wait for gpu to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
// std::cout << "Amount: " << z[i] << std::endl;
maxError = fmax(maxError, fabs(z[i] - 7.0f));
}
std::cout << "Max Error: " << maxError << std::endl;
// free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
5bb5689f4628b06f6406a75a2383f1f7d116ac28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void gcrs_m_1_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_2_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_3_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_4_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
void m_1_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void (*coding_func_array[])(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong) = {
m_1_w_4_coding,m_1_w_5_coding,m_1_w_6_coding,m_1_w_7_coding,m_1_w_8_coding,
m_2_w_4_coding,m_2_w_5_coding,m_2_w_6_coding,m_2_w_7_coding,m_2_w_8_coding,
m_3_w_4_coding,m_3_w_5_coding,m_3_w_6_coding,m_3_w_7_coding,m_3_w_8_coding,
m_4_w_4_coding,m_4_w_5_coding,m_4_w_6_coding,m_4_w_7_coding,m_4_w_8_coding
};
| 5bb5689f4628b06f6406a75a2383f1f7d116ac28.cu | __global__ void gcrs_m_1_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_1_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result = result ^ ( (((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result;
}
__global__ void gcrs_m_2_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_2_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result[2];
result[0] = 0;
result[1] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
}
__global__ void gcrs_m_3_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_3_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result[3];
result[0] = 0;
result[1] = 0;
result[2] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
}
__global__ void gcrs_m_4_w_4_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 4;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_5_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 5;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_6_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 6;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_7_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 7;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
__global__ void gcrs_m_4_w_8_coding_dotprod(
int k, int index,
const long *__restrict in,
long *__restrict out,
const unsigned int *__restrict bm,
int size)
{
HIP_DYNAMIC_SHARED(long, shared_data);
int w = 8;
int i,j;
long result[4];
result[0] = 0;
result[1] = 0;
result[2] = 0;
result[3] = 0;
const unsigned long fullOneBit = 0xFFFFFFFFFFFFFFFF;
int worksize_perblock = blockDim.x / w * w;
const unsigned int idx = worksize_perblock * blockIdx.x + threadIdx.x;
if (threadIdx.x >= worksize_perblock) {
return;
}
if (idx >= size) {
return;
}
int group_offset = (threadIdx.x / w) * w;
int group_inner_offset = threadIdx.x % w;
// row for each thread in the bitmatrix * row size which is k * w
unsigned int bitInt = 0x01;
unsigned int matrixInt;
for ( i = 0; i < k; i++ ) {
shared_data[threadIdx.x] = *(in + i*size + idx);
__syncthreads();
#pragma unroll
for ( j = 0; j < w; j++ ) {
matrixInt = bm[index];
result[0] = result[0] ^ ((((matrixInt & (bitInt<< group_inner_offset)) >> group_inner_offset) * fullOneBit) & shared_data[group_offset + j]);
result[1] = result[1] ^ ((((matrixInt & (bitInt<< (group_inner_offset+w))) >> (group_inner_offset+w)) * fullOneBit) & shared_data[group_offset + j]);
result[2] = result[2] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 2*w))) >> (group_inner_offset + 2*w)) * fullOneBit) & shared_data[group_offset + j]);
result[3] = result[3] ^ ((((matrixInt & (bitInt<< (group_inner_offset + 3*w))) >> (group_inner_offset + 3*w)) * fullOneBit) & shared_data[group_offset + j]);
++index;
}
__syncthreads();
}
out[idx] = result[0];
out[idx + size] = result[1];
out[idx + 2 * size] = result[2];
out[idx + 3 * size] = result[3];
}
void m_1_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_1_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_1_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_2_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_2_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_3_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_3_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_4_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_4_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_5_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_5_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_6_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_6_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_7_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_7_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void m_4_w_8_coding(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong)
{
dim3 gridDim(blockDimX, 1, 1);
dim3 blockDim(threadDimX, 1, 1);
hipLaunchKernelGGL(gcrs_m_4_w_8_coding_dotprod, gridDim, blockDim, threadDimX*sizeof(long), 0,
k, index, (long *)dataPtr, (long *)codeDevPtr, bitMatrixPtr, workSizePerGridInLong);
}
void (*coding_func_array[])(int k, int index,
char *dataPtr, char *codeDevPtr,
const unsigned int *bitMatrixPtr,
int threadDimX,int blockDimX,
int workSizePerGridInLong) = {
m_1_w_4_coding,m_1_w_5_coding,m_1_w_6_coding,m_1_w_7_coding,m_1_w_8_coding,
m_2_w_4_coding,m_2_w_5_coding,m_2_w_6_coding,m_2_w_7_coding,m_2_w_8_coding,
m_3_w_4_coding,m_3_w_5_coding,m_3_w_6_coding,m_3_w_7_coding,m_3_w_8_coding,
m_4_w_4_coding,m_4_w_5_coding,m_4_w_6_coding,m_4_w_7_coding,m_4_w_8_coding
};
|
3cd2ab0d07a314fb56e773aae5a4e63d3fd83880.hip | // !!! This is a file automatically generated by hipify!!!
#include <gputk.h>
#define gpuTKCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
gpuTKLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
#define Mask_width 5
#define Mask_radius Mask_width / 2
#define TILE_WIDTH 16
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
//@@ INSERT CODE HERE
int main(int argc, char *argv[]) {
gpuTKArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
gpuTKImage_t inputImage;
gpuTKImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = gpuTKArg_read(argc, argv); /* parse the input arguments */
inputImageFile = gpuTKArg_getInputFile(arg, 0);
inputMaskFile = gpuTKArg_getInputFile(arg, 1);
inputImage = gpuTKImport(inputImageFile);
hostMaskData = (float *)gpuTKImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = gpuTKImage_getWidth(inputImage);
imageHeight = gpuTKImage_getHeight(inputImage);
imageChannels = gpuTKImage_getChannels(inputImage);
outputImage = gpuTKImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = gpuTKImage_getData(inputImage);
hostOutputImageData = gpuTKImage_getData(outputImage);
gpuTKTime_start(GPU, "Doing GPU Computation (memory + compute)");
gpuTKTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
gpuTKTime_stop(GPU, "Doing GPU memory allocation");
gpuTKTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
gpuTKTime_stop(Copy, "Copying data to the GPU");
gpuTKTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
// convolution<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceMaskData,
// deviceOutputImageData, imageChannels,
// imageWidth, imageHeight);
gpuTKTime_stop(Compute, "Doing the computation on the GPU");
gpuTKTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyDeviceToHost);
gpuTKTime_stop(Copy, "Copying data from the GPU");
gpuTKTime_stop(GPU, "Doing GPU Computation (memory + compute)");
gpuTKSolution(arg, outputImage);
//@@ Insert code here
free(hostMaskData);
gpuTKImage_delete(outputImage);
gpuTKImage_delete(inputImage);
return 0;
}
| 3cd2ab0d07a314fb56e773aae5a4e63d3fd83880.cu | #include <gputk.h>
#define gpuTKCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
gpuTKLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
#define Mask_width 5
#define Mask_radius Mask_width / 2
#define TILE_WIDTH 16
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
//@@ INSERT CODE HERE
int main(int argc, char *argv[]) {
gpuTKArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
gpuTKImage_t inputImage;
gpuTKImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = gpuTKArg_read(argc, argv); /* parse the input arguments */
inputImageFile = gpuTKArg_getInputFile(arg, 0);
inputMaskFile = gpuTKArg_getInputFile(arg, 1);
inputImage = gpuTKImport(inputImageFile);
hostMaskData = (float *)gpuTKImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = gpuTKImage_getWidth(inputImage);
imageHeight = gpuTKImage_getHeight(inputImage);
imageChannels = gpuTKImage_getChannels(inputImage);
outputImage = gpuTKImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = gpuTKImage_getData(inputImage);
hostOutputImageData = gpuTKImage_getData(outputImage);
gpuTKTime_start(GPU, "Doing GPU Computation (memory + compute)");
gpuTKTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
gpuTKTime_stop(GPU, "Doing GPU memory allocation");
gpuTKTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
gpuTKTime_stop(Copy, "Copying data to the GPU");
gpuTKTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
// convolution<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceMaskData,
// deviceOutputImageData, imageChannels,
// imageWidth, imageHeight);
gpuTKTime_stop(Compute, "Doing the computation on the GPU");
gpuTKTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyDeviceToHost);
gpuTKTime_stop(Copy, "Copying data from the GPU");
gpuTKTime_stop(GPU, "Doing GPU Computation (memory + compute)");
gpuTKSolution(arg, outputImage);
//@@ Insert code here
free(hostMaskData);
gpuTKImage_delete(outputImage);
gpuTKImage_delete(inputImage);
return 0;
}
|
92941643b6093224db6b07477e679b649e6091aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
//allocate temporary array for printing
double* mem = (double*) malloc(sizeof (double) *size);
//transfer data from device
hipMemcpy(mem, array_GPU, sizeof (double) *size, hipMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
check_error(hipMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(hipMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice));
check_error(hipMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, hipMemcpyHostToDevice));
check_error(hipMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
hipDeviceSynchronize();
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
check_error(hipMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(hipMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(hipMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 92941643b6093224db6b07477e679b649e6091aa.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
//allocate temporary array for printing
double* mem = (double*) malloc(sizeof (double) *size);
//transfer data from device
cudaMemcpy(mem, array_GPU, sizeof (double) *size, cudaMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
check_error(cudaMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(cudaMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
cudaThreadSynchronize();
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
check_error(cudaMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(cudaMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(cudaMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
2cf83ffeae0e14e97c32cfeb20a6882418783d0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <fstream>
#include "hip/hip_runtime.h"
using namespace std;
__global__ void fast_radix_sort(int *array, int array_len) {
extern __shared__ int tmp_array[];
int *b_array = tmp_array + array_len;
int *s_array = tmp_array + array_len * 2;
int *t_array = tmp_array + array_len * 3;
tmp_array[threadIdx.x] = array[threadIdx.x + array_len * blockIdx.x];
__syncthreads();
for(int i = 0; i < sizeof(int) * 8; i++) {
b_array[threadIdx.x] = (tmp_array[threadIdx.x] >> i) & 1;
__syncthreads();
if (threadIdx.x == 0) {
s_array[0] = 0;
for (int i = 1; i < array_len + 1; i++) {
s_array[i] = s_array[i - 1] + b_array[i - 1];
}
}
__syncthreads();
if (b_array[threadIdx.x] == 0) {
t_array[threadIdx.x - s_array[threadIdx.x]] = tmp_array[threadIdx.x];
}
else {
t_array[s_array[threadIdx.x] + (array_len - s_array[array_len])] = tmp_array[threadIdx.x];
}
__syncthreads();
tmp_array[threadIdx.x] = t_array[threadIdx.x];
__syncthreads();
}
__syncthreads();
array[threadIdx.x + array_len * blockIdx.x] = tmp_array[threadIdx.x];
}
void merge(int *array1, int *array2, int array1_len, int array2_len) {
int i = 0, j = 0, total_array_len = array1_len + array2_len;
int *new_array = new int[total_array_len];
for (int k = 0; k < total_array_len; k++) {
if (i == array1_len) {
new_array[k] = array2[j++];
}
else if (j == array2_len) {
new_array[k] = array1[i++];
}
else if (array1[i] < array2[j]) {
new_array[k] = array1[i++];
}
else {
new_array[k] = array2[j++];
}
}
memcpy(array1, new_array, sizeof(int) * total_array_len);
delete[] new_array;
}
int main(int argc, char** argv) {
int ARR_LEN = atoi(argv[1]);
// int deviceCount;
// hipDeviceProp_t deviceProp;
// // CUDA PC.
// hipGetDeviceCount(&deviceCount);
// printf("Device count: %d\n\n", deviceCount);
// for (int i = 0; i < deviceCount; i++)
// {
// //
// hipGetDeviceProperties(&deviceProp, i);
// //
// printf("Device name: %s\n", deviceProp.name);
// printf("Total global memory: %d\n", deviceProp.totalGlobalMem);
// printf("Shared memory per block: %d\n", deviceProp.sharedMemPerBlock);
// printf("Registers per block: %d\n", deviceProp.regsPerBlock);
// printf("Warp size: %d\n", deviceProp.warpSize);
// printf("Memory pitch: %d\n", deviceProp.memPitch);
// printf("Max threads per block: %d\n", deviceProp.maxThreadsPerBlock);
// printf("Max threads dimensions: x = %d, y = %d, z = %d\n",
// deviceProp.maxThreadsDim[0],
// deviceProp.maxThreadsDim[1],
// deviceProp.maxThreadsDim[2]);
// printf("Max grid size: x = %d, y = %d, z = %d\n",
// deviceProp.maxGridSize[0],
// deviceProp.maxGridSize[1],
// deviceProp.maxGridSize[2]);
// printf("Clock rate: %d\n", deviceProp.clockRate);
// printf("Total constant memory: %d\n", deviceProp.totalConstMem);
// printf("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
// printf("Texture alignment: %d\n", deviceProp.textureAlignment);
// printf("Device overlap: %d\n", deviceProp.deviceOverlap);
// printf("Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
// printf("Kernel execution timeout enabled: %s\n",
// deviceProp.kernelExecTimeoutEnabled ? "true" : "false");
// }
int *array = new int[ARR_LEN];
int *d_array;
int block_num, thread_num, array_len;
for (int f = 1024; f > 0; f--) {
if (ARR_LEN % f == 0) {
block_num = ARR_LEN / f;
thread_num = f;
array_len = f;
break;
}
}
cout << "BlockNum: " << block_num << " ThredNum: " << thread_num << " ArrayLen: " << array_len << endl;
float gpu_time, working_time;
hipEvent_t e_start, e_stop;
srand(time(NULL));
for (int i = 0; i < ARR_LEN; i++) {
array[i] = 1 + rand() % 100;
}
// for (int i = 0; i < ARR_LEN; i++) {
// printf("%d ", array[i]);
// }
// printf("\n");
hipEventCreate(&e_start);
hipEventCreate(&e_stop);
hipError_t cuda_status;
cuda_status = hipMalloc((void**)&d_array, ARR_LEN * sizeof(int));
cuda_status = hipMemcpy(d_array, array, ARR_LEN * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(e_start);
hipLaunchKernelGGL(( fast_radix_sort), dim3(block_num), dim3(thread_num), (array_len * sizeof(int)) * 4, 0, d_array, array_len);
hipEventRecord(e_stop);
cuda_status = hipGetLastError();
if(cuda_status != hipSuccess) {
cout << " #Error# CUDA fast_radix_sort error!" << endl;
goto cuda_error;
}
hipDeviceSynchronize();
hipEventSynchronize(e_stop);
hipEventElapsedTime(&working_time, e_start, e_stop);
hipMemcpy(array, d_array, ARR_LEN * sizeof(int), hipMemcpyDeviceToHost);
double cpu_time;
clock_t c_start, c_end;
c_start = clock();
for (int i = 0; i < block_num - 1; i++) {
merge(array, array + array_len * (i + 1), array_len * (i + 1), array_len);
}
c_end = clock();
for (int i = 0; i < ARR_LEN; i++) {
printf("%d ", array[i]);
}
printf("\n");
cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC;
cout << " Merging time: " << cpu_time << " s" << endl;
gpu_time = working_time / 1000;
cout << " GPU sorting time: " << gpu_time << " s" << endl;
cuda_error:
hipEventDestroy(e_start);
hipEventDestroy(e_stop);
hipFree(d_array);
// for (int i = 0; i < ARR_LEN; i++) {
// printf("%d ", array[i]);
// }
// printf("\n");
ofstream out("out.txt");
for (int j = 0; j < ARR_LEN; j++) {
out << array[j] << endl;
// for (int i = 0; i < block_num; i+=2) {
// merge(array + array_len * i, array + array_len * (i+j), array_len, array_len);
// }
}
out.close();
// double cpu_time;
// clock_t c_start, c_end;
// c_start = clock();
// merge(array, array + array_len, array_len, array_len);
// merge(array, array + array_len * 2, array_len * 2, array_len);
// c_end = clock();
// cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC;
// cout << " Merging time: " << cpu_time << " s" << endl;
delete[] array;
return 0;
}
| 2cf83ffeae0e14e97c32cfeb20a6882418783d0d.cu | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <fstream>
#include "cuda_runtime.h"
using namespace std;
__global__ void fast_radix_sort(int *array, int array_len) {
extern __shared__ int tmp_array[];
int *b_array = tmp_array + array_len;
int *s_array = tmp_array + array_len * 2;
int *t_array = tmp_array + array_len * 3;
tmp_array[threadIdx.x] = array[threadIdx.x + array_len * blockIdx.x];
__syncthreads();
for(int i = 0; i < sizeof(int) * 8; i++) {
b_array[threadIdx.x] = (tmp_array[threadIdx.x] >> i) & 1;
__syncthreads();
if (threadIdx.x == 0) {
s_array[0] = 0;
for (int i = 1; i < array_len + 1; i++) {
s_array[i] = s_array[i - 1] + b_array[i - 1];
}
}
__syncthreads();
if (b_array[threadIdx.x] == 0) {
t_array[threadIdx.x - s_array[threadIdx.x]] = tmp_array[threadIdx.x];
}
else {
t_array[s_array[threadIdx.x] + (array_len - s_array[array_len])] = tmp_array[threadIdx.x];
}
__syncthreads();
tmp_array[threadIdx.x] = t_array[threadIdx.x];
__syncthreads();
}
__syncthreads();
array[threadIdx.x + array_len * blockIdx.x] = tmp_array[threadIdx.x];
}
void merge(int *array1, int *array2, int array1_len, int array2_len) {
int i = 0, j = 0, total_array_len = array1_len + array2_len;
int *new_array = new int[total_array_len];
for (int k = 0; k < total_array_len; k++) {
if (i == array1_len) {
new_array[k] = array2[j++];
}
else if (j == array2_len) {
new_array[k] = array1[i++];
}
else if (array1[i] < array2[j]) {
new_array[k] = array1[i++];
}
else {
new_array[k] = array2[j++];
}
}
memcpy(array1, new_array, sizeof(int) * total_array_len);
delete[] new_array;
}
int main(int argc, char** argv) {
int ARR_LEN = atoi(argv[1]);
// int deviceCount;
// cudaDeviceProp deviceProp;
// //Сколько устройств CUDA установлено на PC.
// cudaGetDeviceCount(&deviceCount);
// printf("Device count: %d\n\n", deviceCount);
// for (int i = 0; i < deviceCount; i++)
// {
// //Получаем информацию об устройстве
// cudaGetDeviceProperties(&deviceProp, i);
// //Выводим иформацию об устройстве
// printf("Device name: %s\n", deviceProp.name);
// printf("Total global memory: %d\n", deviceProp.totalGlobalMem);
// printf("Shared memory per block: %d\n", deviceProp.sharedMemPerBlock);
// printf("Registers per block: %d\n", deviceProp.regsPerBlock);
// printf("Warp size: %d\n", deviceProp.warpSize);
// printf("Memory pitch: %d\n", deviceProp.memPitch);
// printf("Max threads per block: %d\n", deviceProp.maxThreadsPerBlock);
// printf("Max threads dimensions: x = %d, y = %d, z = %d\n",
// deviceProp.maxThreadsDim[0],
// deviceProp.maxThreadsDim[1],
// deviceProp.maxThreadsDim[2]);
// printf("Max grid size: x = %d, y = %d, z = %d\n",
// deviceProp.maxGridSize[0],
// deviceProp.maxGridSize[1],
// deviceProp.maxGridSize[2]);
// printf("Clock rate: %d\n", deviceProp.clockRate);
// printf("Total constant memory: %d\n", deviceProp.totalConstMem);
// printf("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
// printf("Texture alignment: %d\n", deviceProp.textureAlignment);
// printf("Device overlap: %d\n", deviceProp.deviceOverlap);
// printf("Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
// printf("Kernel execution timeout enabled: %s\n",
// deviceProp.kernelExecTimeoutEnabled ? "true" : "false");
// }
int *array = new int[ARR_LEN];
int *d_array;
int block_num, thread_num, array_len;
for (int f = 1024; f > 0; f--) {
if (ARR_LEN % f == 0) {
block_num = ARR_LEN / f;
thread_num = f;
array_len = f;
break;
}
}
cout << "BlockNum: " << block_num << " ThredNum: " << thread_num << " ArrayLen: " << array_len << endl;
float gpu_time, working_time;
cudaEvent_t e_start, e_stop;
srand(time(NULL));
for (int i = 0; i < ARR_LEN; i++) {
array[i] = 1 + rand() % 100;
}
// for (int i = 0; i < ARR_LEN; i++) {
// printf("%d ", array[i]);
// }
// printf("\n");
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaError_t cuda_status;
cuda_status = cudaMalloc((void**)&d_array, ARR_LEN * sizeof(int));
cuda_status = cudaMemcpy(d_array, array, ARR_LEN * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(e_start);
fast_radix_sort<<<block_num, thread_num, (array_len * sizeof(int)) * 4>>>(d_array, array_len);
cudaEventRecord(e_stop);
cuda_status = cudaGetLastError();
if(cuda_status != cudaSuccess) {
cout << " #Error# CUDA fast_radix_sort error!" << endl;
goto cuda_error;
}
cudaDeviceSynchronize();
cudaEventSynchronize(e_stop);
cudaEventElapsedTime(&working_time, e_start, e_stop);
cudaMemcpy(array, d_array, ARR_LEN * sizeof(int), cudaMemcpyDeviceToHost);
double cpu_time;
clock_t c_start, c_end;
c_start = clock();
for (int i = 0; i < block_num - 1; i++) {
merge(array, array + array_len * (i + 1), array_len * (i + 1), array_len);
}
c_end = clock();
for (int i = 0; i < ARR_LEN; i++) {
printf("%d ", array[i]);
}
printf("\n");
cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC;
cout << " Merging time: " << cpu_time << " s" << endl;
gpu_time = working_time / 1000;
cout << " GPU sorting time: " << gpu_time << " s" << endl;
cuda_error:
cudaEventDestroy(e_start);
cudaEventDestroy(e_stop);
cudaFree(d_array);
// for (int i = 0; i < ARR_LEN; i++) {
// printf("%d ", array[i]);
// }
// printf("\n");
ofstream out("out.txt");
for (int j = 0; j < ARR_LEN; j++) {
out << array[j] << endl;
// for (int i = 0; i < block_num; i+=2) {
// merge(array + array_len * i, array + array_len * (i+j), array_len, array_len);
// }
}
out.close();
// double cpu_time;
// clock_t c_start, c_end;
// c_start = clock();
// merge(array, array + array_len, array_len, array_len);
// merge(array, array + array_len * 2, array_len * 2, array_len);
// c_end = clock();
// cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC;
// cout << " Merging time: " << cpu_time << " s" << endl;
delete[] array;
return 0;
}
|
bfc633da9c7ac28034fc0f331ec8ce4f92c1cb05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Modifications Copyright (c) Microsoft. */
#include "roialign_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const bool is_mode_avg,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = is_mode_avg
? (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4) // mode Avg
: max(max(max(w1 * v1, w2 * v2), w3 * v3), w4 * v4); // mode Max
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int64_t nthreads,
const T* bottom_data,
const T spatial_scale,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio,
const T* bottom_rois,
int64_t roi_cols,
T* top_data,
const bool is_mode_avg,
const int64_t* batch_indices_ptr) {
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// RoI could have 4 or 5 columns
const T* offset_bottom_rois = bottom_rois + n * roi_cols;
const auto roi_batch_ind = batch_indices_ptr[n];
bool continuous_coordinate = false;
// Do not using rounding; this implementation detail is critical
T roi_offset = continuous_coordinate ? T(0.5) : T(0);
T roi_start_w = offset_bottom_rois[0] * spatial_scale - roi_offset;
T roi_start_h = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_end_w = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_end_h = offset_bottom_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!continuous_coordinate) { // backward compatiblity
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + static_cast<int64_t>((roi_batch_ind * channels + c) * height * width);
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
bool max_flag = false;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, is_mode_avg, index);
if (is_mode_avg) {
output_val += val;
} else {
if (!max_flag) {
output_val = val;
max_flag = true;
} else {
output_val = max(output_val, val);
}
}
}
}
if (is_mode_avg) {
output_val /= count;
}
top_data[index] = output_val;
}
}
template <typename T>
void RoiAlignImpl(
const int64_t nthreads,
const T* bottom_data,
const T spatial_scale,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio,
const T* bottom_rois,
int64_t roi_cols,
T* top_data,
const bool is_mode_avg,
const int64_t* batch_indices_ptr) {
int blocksPerGrid = (int)(ceil(static_cast<float>(nthreads) / GridDim::maxThreadsPerBlock));
hipLaunchKernelGGL(( RoIAlignForward<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
nthreads,
bottom_data,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
bottom_rois,
roi_cols,
top_data,
is_mode_avg,
batch_indices_ptr);
}
#define SPECIALIZED_IMPL(T) \
template void RoiAlignImpl<T>( \
const int64_t nthreads, \
const T* bottom_data, \
const T spatial_scale, \
const int64_t channels, \
const int64_t height, \
const int64_t width, \
const int64_t pooled_height, \
const int64_t pooled_width, \
const int64_t sampling_ratio, \
const T* bottom_rois, \
int64_t roi_cols, \
T* top_data, \
const bool is_mode_avg, \
const int64_t* batch_indices_ptr);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
| bfc633da9c7ac28034fc0f331ec8ce4f92c1cb05.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Modifications Copyright (c) Microsoft. */
#include "roialign_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const bool is_mode_avg,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = is_mode_avg
? (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4) // mode Avg
: max(max(max(w1 * v1, w2 * v2), w3 * v3), w4 * v4); // mode Max
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int64_t nthreads,
const T* bottom_data,
const T spatial_scale,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio,
const T* bottom_rois,
int64_t roi_cols,
T* top_data,
const bool is_mode_avg,
const int64_t* batch_indices_ptr) {
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// RoI could have 4 or 5 columns
const T* offset_bottom_rois = bottom_rois + n * roi_cols;
const auto roi_batch_ind = batch_indices_ptr[n];
bool continuous_coordinate = false;
// Do not using rounding; this implementation detail is critical
T roi_offset = continuous_coordinate ? T(0.5) : T(0);
T roi_start_w = offset_bottom_rois[0] * spatial_scale - roi_offset;
T roi_start_h = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_end_w = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_end_h = offset_bottom_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!continuous_coordinate) { // backward compatiblity
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + static_cast<int64_t>((roi_batch_ind * channels + c) * height * width);
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
bool max_flag = false;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, is_mode_avg, index);
if (is_mode_avg) {
output_val += val;
} else {
if (!max_flag) {
output_val = val;
max_flag = true;
} else {
output_val = max(output_val, val);
}
}
}
}
if (is_mode_avg) {
output_val /= count;
}
top_data[index] = output_val;
}
}
template <typename T>
void RoiAlignImpl(
const int64_t nthreads,
const T* bottom_data,
const T spatial_scale,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio,
const T* bottom_rois,
int64_t roi_cols,
T* top_data,
const bool is_mode_avg,
const int64_t* batch_indices_ptr) {
int blocksPerGrid = (int)(ceil(static_cast<float>(nthreads) / GridDim::maxThreadsPerBlock));
RoIAlignForward<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
nthreads,
bottom_data,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
bottom_rois,
roi_cols,
top_data,
is_mode_avg,
batch_indices_ptr);
}
#define SPECIALIZED_IMPL(T) \
template void RoiAlignImpl<T>( \
const int64_t nthreads, \
const T* bottom_data, \
const T spatial_scale, \
const int64_t channels, \
const int64_t height, \
const int64_t width, \
const int64_t pooled_height, \
const int64_t pooled_width, \
const int64_t sampling_ratio, \
const T* bottom_rois, \
int64_t roi_cols, \
T* top_data, \
const bool is_mode_avg, \
const int64_t* batch_indices_ptr);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
|
808c59554bbc89fa366600787f6a59d476a539b7.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#include "cusparse_v2.h"
#include "rocblas.h"
#include "hello.cuh"
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include<iostream>
using namespace std;
main(int argc, char *argv[])
{
//numero de nodos
int N=10;
//tamao de la matriz
double nodos=N+2;
//numero de valores no cero dados lo nodos
int nnz=3*(nodos-2)-2;
int i;
double dx=0, dt=0,H0=10, g=9.81,beta,pi=0, L=0, alfa=0;
double *x0=0,*csrValA=0, *u=0, *b=0, *answer=0, *eta=0;
int *csrRowPtrA=0,*csrColIndA=0;
/*
csrValA=(double *)malloc(sizeof(double)*nnz);
csrRowPtrA=(int *)malloc(sizeof(int)*(N+1));
csrColIndA=(int *)malloc(sizeof(int)*nnz);
u=(double *)malloc(sizeof(double)*(N+1));
b=(double *)malloc(sizeof(double)*N);
answer = (double *)malloc(sizeof(double)*N);
x0=(double *)malloc(sizeof(double)*N);
*/
csrValA = new double[nnz];
csrRowPtrA = new int[N+1];
csrColIndA = new int[nnz];
u = new double[N+2];
b = new double[N];
answer = new double[N];
x0= new double[N];
eta= new double[N+1];
printf("Numero de nodos=%f\n",nodos);
printf("Tamao de la matriz=%d\n",N);
printf("Numero de valores no cero=%d\n",nnz);
L=atoi(argv[1]);
printf("L=%f \n",L);
dx=L/(nodos-1);
printf("dx=%f\n",dx);
dt=dx/(10*sqrt(g*H0));
printf("dt=%f\n",dt);
beta=g*H0*(dt*dt)/(dx*dx);
printf("beta=%f\n",beta);
pi=atan(1.0f)*4.0f;
printf("pi=%f\n",pi);
alfa=g*dt/dx;
//se llena vector csrValA
for(i=0;i<=N-2;i++)
{
csrValA[3*i]=(1+2*beta);
csrValA[3*i+1]=-beta;
csrValA[3*i+2]=-beta;
}
csrValA[nnz-1]=(1+2*beta);
//se impreime csr
for(i=0;i<nnz;i++)
{
printf("csrval[%d]=%f\n",i,csrValA[i]);
}
//se llena vector csrRowPtr
csrRowPtrA[0]=0;
for(i=0;i<=N-2;i++)
{
csrRowPtrA[i+1]=2+3*i;
}
csrRowPtrA[N]=nnz;
//se imprime csrRowPrt
for(i=0;i<=N;i++)
{
printf("csrRowPtrA[%d]=%d\n",i,csrRowPtrA[i]);
}
//se llena csrColIndA
csrColIndA[0]=0;
csrColIndA[1]=1;
for(i=0;i<N-2;i++)
{
csrColIndA[2+3*i]=0+i;
csrColIndA[3+3*i]=1+i;
csrColIndA[4+3*i]=2+i;
}
csrColIndA[nnz-2]=N-2;
csrColIndA[nnz-1]=N-1;
//se imprime csrColIndA
for(i=0;i<nnz;i++)
{
printf("csrColIndA[%d]=%d\n",i,csrColIndA[i]);
}
u[0]=u[N+2]=0.0f;
//condiciones de frontera
for(i=1;i<=N;i++)
{
u[i]=0.0f;
}
//imprime condiciones de frontera
for(i=0;i<nodos;i++)
{
printf("u[%d]=%f\n",i,u[i]);
}
for(i=1;i<=nodos-1;i++)
{
eta[i-1]=0.5-0.5*cos(2*pi*i*dx/L);
}
for(i=0;i<nodos-1;i++)
{
printf("eta[%d]=%e\n",i,eta[i]);
}
//se define y llena el vector b
b[0]=u[1]+beta*u[0]-alfa*(eta[1]-eta[0]);
b[N-1]=u[N-1]+beta*u[N]-alfa*(eta[N]-eta[N-1]);
for(i=1;i<N-1;i++)
{
b[i]=u[i+1]-alfa*(eta[i+1]-eta[i]);
}
//se imprime el vector b
for(i=0;i<N;i++)
{
printf("b[%d]=%f\n",i,b[i]);
}
//guess inicial
for(i=0;i<N;i++)
{
x0[i]=0;
printf("x0[%d]=%f\n",i,x0[i]);
}
answer=solverbicg((int*) csrColIndA,(double*) csrValA,(int*) csrRowPtrA,(double*) x0,(double *) b, N, nnz);
printf("Desde main\n");
for(i=0;i<N;i++)
{
printf("%e \n",*(answer+i));
}
FILE *f;
f = fopen ("u_0.csv","w+");
fprintf(f,"x coord, y coord\n");
fprintf(f,"%f,%f\n",0.0f,u[0]);
for(i=0;i<N;i++)
{
fprintf(f,"%f,%f\n",(i+1)*dx,answer[i]);
printf("%f,%f\n",(i+1)*dx,answer[i]);
}
fprintf(f,"%f,%f\n",L,u[N+2]);
fclose(f);
/*
delete [] csrValA;
delete [] csrRowPtrA;
delete [] csrColIndA;
delete [] u;
delete [] b;
delete [] answer;
delete [] x0;
*/
return 0;
}
| 808c59554bbc89fa366600787f6a59d476a539b7.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#include "cusparse_v2.h"
#include "cublas_v2.h"
#include "hello.cuh"
#include <cuda.h>
#include <stdlib.h>
#include<iostream>
using namespace std;
main(int argc, char *argv[])
{
//numero de nodos
int N=10;
//tamaño de la matriz
double nodos=N+2;
//numero de valores no cero dados lo nodos
int nnz=3*(nodos-2)-2;
int i;
double dx=0, dt=0,H0=10, g=9.81,beta,pi=0, L=0, alfa=0;
double *x0=0,*csrValA=0, *u=0, *b=0, *answer=0, *eta=0;
int *csrRowPtrA=0,*csrColIndA=0;
/*
csrValA=(double *)malloc(sizeof(double)*nnz);
csrRowPtrA=(int *)malloc(sizeof(int)*(N+1));
csrColIndA=(int *)malloc(sizeof(int)*nnz);
u=(double *)malloc(sizeof(double)*(N+1));
b=(double *)malloc(sizeof(double)*N);
answer = (double *)malloc(sizeof(double)*N);
x0=(double *)malloc(sizeof(double)*N);
*/
csrValA = new double[nnz];
csrRowPtrA = new int[N+1];
csrColIndA = new int[nnz];
u = new double[N+2];
b = new double[N];
answer = new double[N];
x0= new double[N];
eta= new double[N+1];
printf("Numero de nodos=%f\n",nodos);
printf("Tamaño de la matriz=%d\n",N);
printf("Numero de valores no cero=%d\n",nnz);
L=atoi(argv[1]);
printf("L=%f \n",L);
dx=L/(nodos-1);
printf("dx=%f\n",dx);
dt=dx/(10*sqrt(g*H0));
printf("dt=%f\n",dt);
beta=g*H0*(dt*dt)/(dx*dx);
printf("beta=%f\n",beta);
pi=atan(1.0f)*4.0f;
printf("pi=%f\n",pi);
alfa=g*dt/dx;
//se llena vector csrValA
for(i=0;i<=N-2;i++)
{
csrValA[3*i]=(1+2*beta);
csrValA[3*i+1]=-beta;
csrValA[3*i+2]=-beta;
}
csrValA[nnz-1]=(1+2*beta);
//se impreime csr
for(i=0;i<nnz;i++)
{
printf("csrval[%d]=%f\n",i,csrValA[i]);
}
//se llena vector csrRowPtr
csrRowPtrA[0]=0;
for(i=0;i<=N-2;i++)
{
csrRowPtrA[i+1]=2+3*i;
}
csrRowPtrA[N]=nnz;
//se imprime csrRowPrt
for(i=0;i<=N;i++)
{
printf("csrRowPtrA[%d]=%d\n",i,csrRowPtrA[i]);
}
//se llena csrColIndA
csrColIndA[0]=0;
csrColIndA[1]=1;
for(i=0;i<N-2;i++)
{
csrColIndA[2+3*i]=0+i;
csrColIndA[3+3*i]=1+i;
csrColIndA[4+3*i]=2+i;
}
csrColIndA[nnz-2]=N-2;
csrColIndA[nnz-1]=N-1;
//se imprime csrColIndA
for(i=0;i<nnz;i++)
{
printf("csrColIndA[%d]=%d\n",i,csrColIndA[i]);
}
u[0]=u[N+2]=0.0f;
//condiciones de frontera
for(i=1;i<=N;i++)
{
u[i]=0.0f;
}
//imprime condiciones de frontera
for(i=0;i<nodos;i++)
{
printf("u[%d]=%f\n",i,u[i]);
}
for(i=1;i<=nodos-1;i++)
{
eta[i-1]=0.5-0.5*cos(2*pi*i*dx/L);
}
for(i=0;i<nodos-1;i++)
{
printf("eta[%d]=%e\n",i,eta[i]);
}
//se define y llena el vector b
b[0]=u[1]+beta*u[0]-alfa*(eta[1]-eta[0]);
b[N-1]=u[N-1]+beta*u[N]-alfa*(eta[N]-eta[N-1]);
for(i=1;i<N-1;i++)
{
b[i]=u[i+1]-alfa*(eta[i+1]-eta[i]);
}
//se imprime el vector b
for(i=0;i<N;i++)
{
printf("b[%d]=%f\n",i,b[i]);
}
//guess inicial
for(i=0;i<N;i++)
{
x0[i]=0;
printf("x0[%d]=%f\n",i,x0[i]);
}
answer=solverbicg((int*) csrColIndA,(double*) csrValA,(int*) csrRowPtrA,(double*) x0,(double *) b, N, nnz);
printf("Desde main\n");
for(i=0;i<N;i++)
{
printf("%e \n",*(answer+i));
}
FILE *f;
f = fopen ("u_0.csv","w+");
fprintf(f,"x coord, y coord\n");
fprintf(f,"%f,%f\n",0.0f,u[0]);
for(i=0;i<N;i++)
{
fprintf(f,"%f,%f\n",(i+1)*dx,answer[i]);
printf("%f,%f\n",(i+1)*dx,answer[i]);
}
fprintf(f,"%f,%f\n",L,u[N+2]);
fclose(f);
/*
delete [] csrValA;
delete [] csrRowPtrA;
delete [] csrColIndA;
delete [] u;
delete [] b;
delete [] answer;
delete [] x0;
*/
return 0;
}
|
e3131906009dd0f67273640053401d25d4f35e19.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// modified to use only 7 floats for triMem
//1. #define TRIMEMLENGTH 7
//2. in FIMCuda and run_neighbor_check, add initilize old at the begining of iteration
//3. in FIMCuda and run_neighbor_check, s_triMem[tx*TRIMEMLENGTH + 3 + C] = TC after each iteration instead of s_triMem[tx*TRIMEMLENGTH + 6 + C] = TC
//4. in FIMCuda and run_neighbor_check, in the reconcile step, there should be no +3 in fetching the location of triMem
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "meshFIM2d_eikonal.h"
#include "Vec.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "CUDADefines.h"
#include <sstream>
#include <time.h>
#ifdef WIN32
#include <io.h>
#define unlink _unlink
#else
#include <unistd.h>
#endif
extern "C" {
#include <metis.h>
}
/////declaration for cuda kernels///////////////////////////
extern __global__ void run_reduction(int *con, int *blockCon,int* ActiveList, int nActiveBlock, int* blockSizes);
extern __global__ void FIMCuda(float* d_triMem,float* d_triMemOut, int* d_vertMem, int* d_vertMemOutside, float* d_edgeMem0,float* d_edgeMem1,float* d_edgeMem2,float* d_speed, int* d_BlockSizes, int* d_con, int* ActiveList, int nActiveBlock,int maxNumTotalFaces, int maxNumVert,/*int nIter, */float m_StopDistance);
extern __global__ void run_check_neighbor(float* d_triMem,float* d_triMemOut, int* d_vertMem,int* d_vertMemOutside,float* d_edgeMem0,float* d_edgeMem1,float* d_edgeMem2, float* d_speed, int* d_BlockSizes, int* d_con,int* d_ActiveList, int numOldActive ,int maxNumTotalFaces, int maxNumVert,int nTotalActive, int m_StopDistance);
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
return true;
}
#endif
/////////////////////////////////////////////////////////////////////////////
void meshFIM2dEikonal::writeVTK(std::vector< std::vector <float> > time_values)
{
size_t nv = m_meshPtr->vertices.size();
size_t nt = m_meshPtr->faces.size();
for (size_t j = 0; j < time_values.size(); j++) {
FILE* vtkfile;
std::stringstream ss;
ss << "result" << j << ".vtk";
vtkfile = fopen(ss.str().c_str(), "w+");
fprintf(vtkfile, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET UNSTRUCTURED_GRID\n");
fprintf(vtkfile, "POINTS %d float\n", nv);
for (size_t i = 0; i < nv; i++)
{
fprintf(vtkfile, "%.12f %.12f %.12f\n", m_meshPtr->vertices[i][0],
m_meshPtr->vertices[i][1], m_meshPtr->vertices[i][2]);
}
fprintf(vtkfile, "CELLS %d %d\n", nt, nt * 4);
for (size_t i = 0; i < nt; i++)
{
fprintf(vtkfile, "3 %d %d %d\n", m_meshPtr->faces[i][0],
m_meshPtr->faces[i][1], m_meshPtr->faces[i][2]);
}
fprintf(vtkfile, "CELL_TYPES %d\n", nt);
for (size_t i = 0; i < nt; i++)
{
fprintf(vtkfile, "5\n");
}
fprintf(vtkfile, "POINT_DATA %d\nSCALARS traveltime float 1\nLOOKUP_TABLE default\n", nv);
for (size_t i = 0; i < nv; i++)
{
fprintf(vtkfile, "%.12f\n", time_values[j][i]);
}
fclose(vtkfile);
}
}
//create .mesh file from trimesh faces and call partnmesh method
//to partition and create intermediate mesh.npart.N file and then read this file
void meshFIM2dEikonal::GraphPartition_METIS2(int& numBlock, int maxNumBlockVerts, bool verbose)
{
FILE * outf;
outf = fopen("tmp.mesh", "w+");
if(outf == NULL)
{
printf("Cannot open mesh file to write!!!!\n");
exit(1);
}
size_t sz = m_meshPtr->faces.size();
fprintf(outf,"%d 1\n", sz);
for (int i=0;i<sz;i++)
fprintf(outf, "%d %d %d\n",m_meshPtr->faces[i].v[0]+1,m_meshPtr->faces[i].v[1]+1,m_meshPtr->faces[i].v[2]+1);
fclose(outf);
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
char outputFileName[512];
char meshfile[] = "tmp.mesh";
if(numBlock == 0)
{
numBlock = static_cast<int>(numVert) / maxNumBlockVerts;
do{
numBlock++;
m_BlockSizes.resize(numBlock);
for(int i=0; i< numBlock;i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile,numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found: %s\n",outputFileName);
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i<numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumVert = 0;
for(int i = 0 ; i < numBlock; i++)
{
m_maxNumVert = MAX(m_maxNumVert, m_BlockSizes[i]);
}
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}while(m_maxNumVert != maxNumBlockVerts);
}
else
{
m_BlockSizes.resize(numBlock);
for(int i=0; i< numBlock;i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile,numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found: %s\n",outputFileName);
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i<numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumVert = 0;
for(int i = 0 ; i < numBlock; i++)
{
m_maxNumVert = MAX(m_maxNumVert, m_BlockSizes[i]);
}
if (verbose)
printf("max num vert is : %d\n", m_maxNumVert);
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}
srand( (unsigned)time( NULL ) );
if (verbose)
printf("numBlock is : %d\n", numBlock);
float r,g,b;
vector< Color > colors;
colors.resize(numBlock);
for(int i = 0; i< numBlock; i++)
{
r = rand() / (float)RAND_MAX;
g = rand() / (float)RAND_MAX;
b = rand() / (float)RAND_MAX;
colors[i] = Color(r,g,b);
}
m_meshPtr->colors.resize(numVert);
m_PartitionVerts.resize(numBlock);
for(int i = 0; i<numVert; i++)
{
m_PartitionVerts[m_PartitionLabel[i]].push_back(i);
m_meshPtr->colors[i] = colors[m_PartitionLabel[i]];
}
unlink("tmp.mesh");
/*
typedef cusp::array1d<int, cusp::host_memory> IdxVector_h;
typedef cusp::array1d<int, cusp::device_memory> IdxVector_d;
/////////BETTER WAY TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
int options[10], pnumflag = 0, wgtflag = 0;
options[0] = 0;
int edgecut;
npart_h = IdxVector_h(numVert);
nparts = numVert / metissize;
if (nparts < 2)
nparts = 2;
// Counting up edges for adjacency:
int edgeCount = 0;
for (int vIt = 0; vIt < numVert; vIt++)
{
edgeCount += m_meshPtr->neighbors[vIt].size();
}
m_largest_num_inside_mem = 0;
//for(int bidx = 0; bidx < nparts; bidx++)
for (int i = 0; i < numVert; i++)
{
if (m_meshPtr->adjacentfaces[i].size() > m_largest_num_inside_mem)
m_largest_num_inside_mem = m_meshPtr->adjacentfaces[i].size();
}
if (verbose)
printf("m_largest_num_inside_mem = %d\n", m_largest_num_inside_mem);
//Allocating storage for array values of adjacency
int* xadj = new int[numVert + 1];
int* adjncy = new int[edgeCount];
// filling the arrays:
xadj[0] = 0;
int idx = 0;
IdxVector_h neighbor_sizes(numVert);
// Populating the arrays:
for (int i = 1; i < numVert + 1; i++)
{
neighbor_sizes[i - 1] = m_meshPtr->neighbors[i - 1].size();
xadj[i] = xadj[i - 1] + m_meshPtr->neighbors[i - 1].size();
for (int j = 0; j < m_meshPtr->neighbors[i - 1].size(); j++)
{
adjncy[idx++] = m_meshPtr->neighbors[i - 1][j];
}
}
m_neighbor_sizes_d = neighbor_sizes;
int* npart_h_ptr = thrust::raw_pointer_cast(&npart_h[0]);
METIS_PartGraphKway(&numVert, xadj, adjncy, NULL, NULL, &wgtflag,
&pnumflag, &nparts, options, &edgecut, npart_h_ptr);
m_xadj_d = IdxVector_d(&xadj[0], &xadj[numVert + 1]);
m_adjncy_d = IdxVector_d(&adjncy[0], &adjncy[edgeCount]);
IdxVector_h part_sizes(nparts, 0);
for (int i = 0; i < numVert; i++)
{
part_sizes[npart_h[i]]++;
}
int min_part_size = thrust::reduce(part_sizes.begin(),
part_sizes.end(), 100000000, thrust::minimum<int>());
largest_vert_part = thrust::reduce(part_sizes.begin(),
part_sizes.end(), -1, thrust::maximum<int>());
if (verbose)
printf("Largest vertex partition size is: %d\n", largest_vert_part);
if (min_part_size == 0) printf("Min partition size is 0!!\n");
delete[] xadj;
delete[] adjncy;*/
}
void meshFIM2dEikonal::GraphPartition_Square(int squareLength,int squareWidth, int blockLength, int blockWidth, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
int numBlockLength = (squareLength / blockLength);
int numBlockWidth = (squareWidth / blockWidth);
int numBlock = numBlockLength * numBlockWidth;
for(int i = 0; i< squareWidth; i++)
for(int j =0; j< squareLength; j++)
{
m_PartitionLabel[i*squareLength+j] = (i/blockWidth) * numBlockLength + (j/blockLength);
}
m_BlockSizes.resize(numBlock);
for(int i =0; i<numBlock; i++)
m_BlockSizes[i] = 0;
float r,g,b;
vector< Color > colors;
colors.resize(numBlock);
for(int i = 0; i< numBlock; i++)
{
r = rand() / (float)RAND_MAX;
g = rand() / (float)RAND_MAX;
b = rand() / (float)RAND_MAX;
colors[i] = Color(r,g,b);
}
m_meshPtr->colors.resize(numVert);
m_PartitionVerts.resize(numBlock);
for(int i = 0; i<numVert; i++)
{
m_PartitionVerts[m_PartitionLabel[i]].push_back(i);
m_BlockSizes[m_PartitionLabel[i]]++;
m_meshPtr->colors[i] = colors[m_PartitionLabel[i]];
}
m_maxNumVert = 0;
for(int i = 0 ; i < numBlock; i++)
{
m_maxNumVert = MAX(m_maxNumVert, m_BlockSizes[i]);
}
if (verbose)
printf("final number of blocks: %d\n", numBlock);
}
void meshFIM2dEikonal::PartitionFaces(int numBlock)
{
///////////////////step 3: partition faces//////////////////////////////////////
m_PartitionFaces.resize(numBlock);
m_PartitionNbFaces.resize(numBlock);
size_t numFaces = m_meshPtr->faces.size();
TriMesh::Face f;
int labelv0;
int labelv1;
int labelv2;
vector<TriMesh::Face> virtualfaces;
vector<int> virtualFaceCnt;
virtualFaceCnt.resize(numBlock);
m_PartitionVirtualFaces.resize(numBlock);
for(int i = 0; i< numBlock; i++)
virtualFaceCnt[i] = 0;
m_BlockNeighbor.resize(numBlock);
for(int i = 0; i < numFaces; i++)
{
f = m_meshPtr->faces[i];
size_t vfCnt = m_meshPtr->faceVirtualFaces[i].size();
for(int k = 0 ; k < 3; k++)
{
if(!m_meshPtr->IsNonObtuse(f[k], f))
{
virtualFaceCnt[m_PartitionLabel[f[k]]] += static_cast<int>(vfCnt);
m_PartitionVirtualFaces[m_PartitionLabel[f[k]]].insert(m_PartitionVirtualFaces[m_PartitionLabel[f[k]]].end(), m_meshPtr->faceVirtualFaces[i].begin(), m_meshPtr->faceVirtualFaces[i].end());
}
}
labelv0 = m_PartitionLabel[f[0]];
labelv1 = m_PartitionLabel[f[1]];
labelv2 = m_PartitionLabel[f[2]];
if(labelv0 == labelv1 && labelv1 == labelv2)
{
m_PartitionFaces[labelv0].push_back(i);
}
else if(labelv0 == labelv1 && labelv1 != labelv2)
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv2].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv0);
}
else if(labelv0 != labelv1 && labelv1 == labelv2)
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv2].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv0);
}
else if(labelv0 == labelv2 && labelv1 != labelv2)
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv1].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv1);
m_BlockNeighbor[labelv1].insert(m_BlockNeighbor[labelv1].end(), labelv0);
}
else //all different
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv1].push_back(i);
m_PartitionNbFaces[labelv2].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv0);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv1);
m_BlockNeighbor[labelv1].insert(m_BlockNeighbor[labelv1].end(), labelv0);
m_BlockNeighbor[labelv1].insert(m_BlockNeighbor[labelv1].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv1);
}
}
vector<int> PartitionToltalFaces;
PartitionToltalFaces.resize(numBlock);
m_maxNumTotalFaces = 0;
for(int j = 0; j < numBlock; j++)
{
PartitionToltalFaces[j] = static_cast<int>(m_PartitionFaces[j].size() +
m_PartitionNbFaces[j].size() + virtualFaceCnt[j]);
m_maxNumTotalFaces = MAX(PartitionToltalFaces[j],m_maxNumTotalFaces );
}
}
std::vector< std::vector<float> > meshFIM2dEikonal::GenerateData(int numBlock,
int maxIterations, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
size_t numFaces = m_meshPtr->faces.size();
if(!InitCUDA()) {
exit(1);
}
index *d_ActiveList= 0;
int *d_con;
int *d_con_forComputaion;
int *d_blockCon;
float *d_triMem;
float *d_edgeMem0;
float *d_edgeMem1;
float *d_edgeMem2;
float *d_speed;
float *d_triMemOut;
int *d_vertMem;
int *d_BlockSizes;
index *h_ActiveList= 0; //list of active blocks
int *h_BlockLabel = 0; //block active or not
float *h_triMem;
float *h_edgeMem0;
float *h_edgeMem1;
float *h_edgeMem2;
float *h_speed;
int *h_vertMem;
int *h_blockCon;
int *h_BlockSizes;
/////////////////////////////malloc cpu memories///////////////////////////
h_BlockLabel = (int*) malloc(sizeof(int) * numBlock);
h_edgeMem0 = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_edgeMem1 = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_edgeMem2 = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_speed = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_triMem = (float*)malloc(sizeof(float) * TRIMEMLENGTH * m_maxNumTotalFaces * numBlock);
h_vertMem = (int*)malloc(sizeof(int) * VERTMEMLENGTH * m_maxNumVert * numBlock);
h_BlockSizes = (int*)malloc(sizeof(int) * numBlock);
h_blockCon = (int*)malloc(sizeof(int) * numBlock);
/////////////////////////malloc gpu memories//////////////////////////////
cudaSafeCall( hipMalloc((void**) &d_con, sizeof(int) * numBlock * REDUCTIONSHARESIZE));
cudaSafeCall( hipMalloc((void**) &d_con_forComputaion, sizeof(int) * numBlock * REDUCTIONSHARESIZE));
cudaSafeCall( hipMalloc((void**) &d_blockCon, sizeof(int) * numBlock));
cudaSafeCall( hipMalloc((void**) &d_triMem, sizeof(float) * TRIMEMLENGTH * m_maxNumTotalFaces * numBlock));
cudaSafeCall( hipMalloc((void**) &d_triMemOut, sizeof(float) * TRIMEMLENGTH * m_maxNumTotalFaces * numBlock));
cudaSafeCall( hipMalloc((void**) &d_edgeMem0, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( hipMalloc((void**) &d_edgeMem1, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( hipMalloc((void**) &d_edgeMem2, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( hipMalloc((void**) &d_speed, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( hipMalloc((void**) &d_vertMem, sizeof(int) * VERTMEMLENGTH * m_maxNumVert * numBlock));
cudaSafeCall( hipMalloc((void**) &d_BlockSizes, sizeof(int) * numBlock));
/////////////////initialize cpu memories//////////////////////////////
vector< vector<int> > blockVertMapping;
blockVertMapping.resize(numVert); //for each vertex, store the addresses where it appears in the global triMem array.
for( int i = 0; i < numBlock; i++)
{
int blockIdx = i * m_maxNumTotalFaces * TRIMEMLENGTH;
size_t numPF = m_PartitionFaces[i].size();
for(int j = 0; j< numPF; j++)
{
h_edgeMem0[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].edgeLens[0];
h_edgeMem1[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].edgeLens[1];
h_edgeMem2[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].edgeLens[2];
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
h_speed[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].speedInv;
blockVertMapping[m_meshPtr->faces[m_PartitionFaces[i][j]][0]].push_back(blockIdx + j*TRIMEMLENGTH + 0);
blockVertMapping[m_meshPtr->faces[m_PartitionFaces[i][j]][1]].push_back(blockIdx + j*TRIMEMLENGTH + 1);
blockVertMapping[m_meshPtr->faces[m_PartitionFaces[i][j]][2]].push_back(blockIdx + j*TRIMEMLENGTH + 2);
}
}
for( int i = 0; i < numBlock; i++)
{
h_blockCon[i] = 1;
h_BlockLabel[i] = m_BlockLabel[i];
h_BlockSizes[i] = m_BlockSizes[i];
int blockIdx = i * m_maxNumTotalFaces * TRIMEMLENGTH;
size_t numPF = m_PartitionFaces[i].size();
size_t numPNF = m_PartitionNbFaces[i].size();
size_t numPVF = m_PartitionVirtualFaces[i].size();
int k = 0;
int l = 0;
for(int j = static_cast<int>(numPF); j< m_maxNumTotalFaces; j++)
{
if( j < numPF + numPNF)
{
h_edgeMem0[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionNbFaces[i][k]].edgeLens[0];
h_edgeMem1[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionNbFaces[i][k]].edgeLens[1];
h_edgeMem2[i * m_maxNumTotalFaces + j]= m_meshPtr->faces[m_PartitionNbFaces[i][k]].edgeLens[2];
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
h_speed[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionNbFaces[i][k]].speedInv;
blockVertMapping[m_meshPtr->faces[m_PartitionNbFaces[i][k]][0]].push_back(blockIdx + j*TRIMEMLENGTH + 0);
blockVertMapping[m_meshPtr->faces[m_PartitionNbFaces[i][k]][1]].push_back(blockIdx + j*TRIMEMLENGTH + 1);
blockVertMapping[m_meshPtr->faces[m_PartitionNbFaces[i][k]][2]].push_back(blockIdx + j*TRIMEMLENGTH + 2);
k++;
}
else if (j < numPF + numPNF + numPVF)
{
h_edgeMem0[i * m_maxNumTotalFaces + j]= m_PartitionVirtualFaces[i][l].edgeLens[0];
h_edgeMem1[i * m_maxNumTotalFaces + j]= m_PartitionVirtualFaces[i][l].edgeLens[1];
h_edgeMem2[i * m_maxNumTotalFaces + j] = m_PartitionVirtualFaces[i][l].edgeLens[2];
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
h_speed[i * m_maxNumTotalFaces + j] =m_PartitionVirtualFaces[i][l].speedInv;
blockVertMapping[m_PartitionVirtualFaces[i][l][0]].push_back(blockIdx + j*TRIMEMLENGTH + 0);
blockVertMapping[m_PartitionVirtualFaces[i][l][1]].push_back(blockIdx + j*TRIMEMLENGTH + 1);
blockVertMapping[m_PartitionVirtualFaces[i][l][2]].push_back(blockIdx + j*TRIMEMLENGTH + 2);
l++;
}
else
{
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
}
}
}
m_maxNumVertMapping = 0;
for(int i =0; i < numVert; i++)
{
int blockIndex = m_PartitionLabel[i];
int tmp = blockVertMapping[i][0];
int maxi = (blockIndex+1) * m_maxNumTotalFaces * TRIMEMLENGTH;
int mini = blockIndex * m_maxNumTotalFaces * TRIMEMLENGTH;
if( ( tmp< mini) || (tmp >= maxi) )
{
for(int j =0; j < blockVertMapping[i].size(); j++)
if(blockVertMapping[i][j] >= mini && blockVertMapping[i][j] < maxi )
{
int swaptmp = tmp;
blockVertMapping[i][0] = blockVertMapping[i][j];
blockVertMapping[i][j] = swaptmp;
break;
}
}
m_maxNumVertMapping = static_cast<int>(MAX(m_maxNumVertMapping, blockVertMapping[i].size()));
}
for(int i =0; i < numVert; i++)
{
int blockIndex = m_PartitionLabel[i];
int tmp = blockVertMapping[i][0];
int maxi = (blockIndex+1) * m_maxNumTotalFaces * TRIMEMLENGTH;
int mini = blockIndex * m_maxNumTotalFaces * TRIMEMLENGTH;
if( ( tmp< mini) || (tmp >= maxi) )
{
printf(" WARNING: block beyond limits.");
}
}
vector< vector<int> > blockVertMappingInside;
vector< vector<int> > blockVertMappingOutside;
blockVertMappingInside.resize(numVert);
blockVertMappingOutside.resize(numVert);
for(int i = 0; i< numBlock; i++)
{
int triIdx = i * TRIMEMLENGTH * m_maxNumTotalFaces;
for(int m = 0; m < m_PartitionVerts[i].size(); m++)
{
vector<int> tmp = blockVertMapping[m_PartitionVerts[i][m]];
for(int n = 0; n < tmp.size(); n++)
{
if( tmp[n] >= triIdx + 0 && tmp[n] < triIdx + m_maxNumTotalFaces*TRIMEMLENGTH)
blockVertMappingInside[m_PartitionVerts[i][m]].push_back(tmp[n]);
else
{
blockVertMappingOutside[m_PartitionVerts[i][m]].push_back(tmp[n]);
}
}
}
}
int maxVertMappingInside = 0;
int maxVertMappingOutside = 0;
for(int i =0; i< numVert; i++)
{
maxVertMappingInside = static_cast<int>(MAX(maxVertMappingInside, (blockVertMappingInside[i].size())));
maxVertMappingOutside = static_cast<int>(MAX(maxVertMappingInside, (blockVertMappingOutside[i].size())));
}
if (verbose) {
printf("maxVertMappingInside is: %d\n",maxVertMappingInside);
printf("maxVertMappingOutside is: %d\n",maxVertMappingOutside);
}
for(int i = 0; i< numBlock; i++)
{
int vertIdx = i * VERTMEMLENGTH * m_maxNumVert;
for(int m = 0; m < m_PartitionVerts[i].size(); m++)
{
size_t tmpsize = blockVertMappingInside[m_PartitionVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMem[vertIdx + m*VERTMEMLENGTH + n] = blockVertMappingInside[m_PartitionVerts[i][m]][n];
for(;n<VERTMEMLENGTH; n++)
h_vertMem[vertIdx + m*VERTMEMLENGTH + n] = -1 + i*m_maxNumTotalFaces*TRIMEMLENGTH;
}
for (size_t m = m_PartitionVerts[i].size() * VERTMEMLENGTH; m < m_maxNumVert * VERTMEMLENGTH; m++)
{
h_vertMem[vertIdx + m] = -1 + i*m_maxNumTotalFaces*TRIMEMLENGTH;
}
}
int* h_vertMemOutside = (int*)malloc(m_maxNumVert * numBlock * VERTMEMLENGTHOUTSIDE * sizeof(int));
int* d_vertMemOutside;
cudaSafeCall( hipMalloc((void**) &d_vertMemOutside, m_maxNumVert * numBlock * VERTMEMLENGTHOUTSIDE * sizeof(int) ) );
for(int i = 0; i< numBlock; i++)
{
int vertIdx = i * VERTMEMLENGTHOUTSIDE * m_maxNumVert;
for(int m = 0; m < m_PartitionVerts[i].size(); m++)
{
size_t tmpsize = blockVertMappingOutside[m_PartitionVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMemOutside[vertIdx + m*VERTMEMLENGTHOUTSIDE + n] = blockVertMappingOutside[m_PartitionVerts[i][m]][n];
for(;n<VERTMEMLENGTHOUTSIDE; n++)
h_vertMemOutside[vertIdx + m*VERTMEMLENGTHOUTSIDE + n] = -1;
}
for(size_t m = m_PartitionVerts[i].size() * VERTMEMLENGTHOUTSIDE; m < m_maxNumVert * VERTMEMLENGTHOUTSIDE; m++)
{
h_vertMemOutside[vertIdx + m] = -1;
}
}
h_ActiveList = (int*)malloc(sizeof(int)*numBlock);
cudaSafeCall( hipMalloc((void**) &d_ActiveList, sizeof(int) * numBlock));
//////////////////////////////////////////////////////////////////////////////////
vector<int> nb;
int numActive;
for( int i = 0; i < numBlock; i++)
{
h_blockCon[i] = 1;
h_BlockLabel[i] = m_BlockLabel[i];
h_BlockSizes[i] = m_BlockSizes[i];
}
//////////////initialize the seed points for h_triMem////////////////////////////////////
for(int i = 0; i< m_SeedPoints.size(); i++)
{
int seed = m_SeedPoints[i];
int seedBelongToBlock = m_PartitionLabel[seed];
h_blockCon[seedBelongToBlock] = 0;
for(int j = 0; j < blockVertMapping[seed].size(); j++)
{
h_triMem[blockVertMapping[seed][j]] = 0.0;
}
}
/////////////copy triMem and verMem to a vector just for debugging/////////////////
vector<float> vec_triMem;
vector<int> vec_vertMem;
vector<int> vec_vertMemOutside;
vec_triMem.resize(TRIMEMLENGTH * m_maxNumTotalFaces * numBlock);
vec_vertMem.resize(VERTMEMLENGTH * m_maxNumVert * numBlock);
vec_vertMemOutside.resize(VERTMEMLENGTHOUTSIDE * m_maxNumVert * numBlock);
for(int i =0; i < TRIMEMLENGTH * m_maxNumTotalFaces * numBlock; i++)
vec_triMem[i] = h_triMem[i];
for(int i = 0; i< VERTMEMLENGTH * m_maxNumVert * numBlock; i++)
vec_vertMem[i] = h_vertMem[i];
for(int i = 0; i< VERTMEMLENGTHOUTSIDE * m_maxNumVert * numBlock; i++)
vec_vertMemOutside[i] = h_vertMemOutside[i];
////////////////////////////////////////////////////////////////////////////
cudaSafeCall( hipMemcpy( d_triMem,h_triMem, sizeof(float) * m_maxNumTotalFaces * numBlock * TRIMEMLENGTH, hipMemcpyHostToDevice));
numActive = static_cast<int>(m_ActiveBlocks.size());
set<int>::iterator activeiter = m_ActiveBlocks.begin();
for(int i =0; activeiter != m_ActiveBlocks.end(); activeiter++)
h_ActiveList[i++] = *activeiter;
hipEvent_t start, stop, startCopy, stopCopy;
hipEventCreate(&start);
hipEventCreate(&startCopy);
hipEventCreate(&stopCopy);
hipEventCreate(&stop);
hipEventRecord(startCopy,0);
//////////////////copy to gpu memories///////////////////////////////
cudaSafeCall( hipMemcpy( d_triMem,h_triMem, sizeof(float) * m_maxNumTotalFaces * numBlock * TRIMEMLENGTH, hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_triMemOut,h_triMem, sizeof(float) * m_maxNumTotalFaces * numBlock * TRIMEMLENGTH, hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_edgeMem0,h_edgeMem0, sizeof(float) * m_maxNumTotalFaces * numBlock , hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_edgeMem1,h_edgeMem1, sizeof(float) * m_maxNumTotalFaces * numBlock , hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_edgeMem2,h_edgeMem2, sizeof(float) * m_maxNumTotalFaces * numBlock , hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_speed,h_speed, sizeof(float) * m_maxNumTotalFaces * numBlock , hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_vertMem,h_vertMem, sizeof(int) * m_maxNumVert * numBlock * VERTMEMLENGTH, hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_vertMemOutside,h_vertMemOutside, sizeof(int) * m_maxNumVert * numBlock * VERTMEMLENGTHOUTSIDE, hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_BlockSizes,h_BlockSizes, sizeof(int) * numBlock, hipMemcpyHostToDevice));
cudaSafeCall( hipMemcpy( d_blockCon,h_blockCon, sizeof(int) * numBlock, hipMemcpyHostToDevice));
if (verbose)
printf("max number of triangles per block: %d\n", m_maxNumTotalFaces);
int nTotalIter = 0;
hipEventRecord(start,0);
int totalIterationNumber = 0;
std::vector<std::vector<float> > iteration_values;
while ( numActive > 0)
{
///////////step 1: run solver /////////////////////////////////////////////////////////////
nTotalIter++;
if (nTotalIter > maxIterations) break;
totalIterationNumber += numActive;
if (verbose ) {
size_t act = numActive / 3;
for(size_t ab = 0; ab < 60; ab++) {
if (ab < act)
printf("=");
else
printf(" ");
}
printf(" %d Active blocks.\n", numActive);
}
dim3 dimGrid(numActive, 1);
dim3 dimBlock(m_maxNumTotalFaces, 1);
cudaSafeCall( hipMemcpy( d_ActiveList,h_ActiveList,sizeof(int) * numBlock, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( FIMCuda), dim3(dimGrid), dim3(dimBlock), m_maxNumTotalFaces*TRIMEMLENGTH*sizeof(float)+m_maxNumVert*VERTMEMLENGTH*sizeof(short), 0, d_triMem,d_triMemOut, d_vertMem,d_vertMemOutside,d_edgeMem0,d_edgeMem1,d_edgeMem2, d_speed, d_BlockSizes, d_con,d_ActiveList, numActive,m_maxNumTotalFaces, m_maxNumVert, m_StopDistance);
cudaCheckErrors();
//////////////////////step 2: reduction////////////////////////////////////////////////
dimBlock = dim3(REDUCTIONSHARESIZE / 2 , 1);
hipLaunchKernelGGL(( run_reduction), dim3(dimGrid), dim3(dimBlock/*), sizeof(int)*m_maxNumVert*/, 0, 0, d_con, d_blockCon,d_ActiveList, numActive, d_BlockSizes);
cudaCheckErrors();
//////////////////////////////////////////////////////////////////
// 3. check neighbor tiles of converged tile
// Add any active block of neighbor of converged block is inserted
// to the list
cudaSafeCall( hipMemcpy(h_blockCon, d_blockCon, numBlock*sizeof(int), hipMemcpyDeviceToHost) );
int nOldActiveBlock = numActive;
for (size_t i = 0; i < static_cast<size_t>(nOldActiveBlock); i++)
{
// check neighbors of current active tile
uint currBlkIdx = h_ActiveList[i];
if(h_blockCon[currBlkIdx]) // not active : converged
{
set<int> nb = m_BlockNeighbor[currBlkIdx];
set<int>::iterator iter;
for( iter = nb.begin(); iter != nb.end() ; iter++)
{
int currIdx = *iter;
if(h_BlockLabel[currIdx] == FARP)
{
h_BlockLabel[currIdx] = ACTIVE;
h_ActiveList[numActive++] = currIdx;
}
}
}
}
//////////////////////////////////////////////////////////////////
// 4. run solver only once for neighbor blocks of converged block
// current active list contains active blocks and neighbor blocks of
// any converged blocks
//
cudaSafeCall( hipMemcpy(d_ActiveList, h_ActiveList, numActive*sizeof(int), hipMemcpyHostToDevice) );
dimGrid = dim3(numActive, 1);
dimBlock = dim3(m_maxNumTotalFaces, 1);
run_check_neighbor << < dimGrid, dimBlock, m_maxNumTotalFaces*TRIMEMLENGTH*sizeof(float) +
m_maxNumVert*VERTMEMLENGTH*sizeof(short) >> >(d_triMemOut, d_triMem, d_vertMem, d_vertMemOutside,
d_edgeMem0, d_edgeMem1, d_edgeMem2, d_speed, d_BlockSizes, d_con, d_ActiveList, nOldActiveBlock,
m_maxNumTotalFaces, m_maxNumVert, numActive, static_cast<int>(m_StopDistance));
cudaCheckErrors();
//////////////////////////////////////////////////////////////////
// 5. reduction
dimGrid = dim3(numActive, 1);
dimBlock = dim3(REDUCTIONSHARESIZE / 2 , 1);
hipLaunchKernelGGL(( run_reduction), dim3(dimGrid), dim3(dimBlock/*), sizeof(int)*m_maxNumVert*/, 0, 0, d_con, d_blockCon,d_ActiveList,numActive, d_BlockSizes);
cudaCheckErrors();
//////////////////////////////////////////////////////////////////
// 6. update active list
// read back active volume from the device and add
// active block to active list on the host memory
numActive = 0;
cudaSafeCall( hipMemcpy(h_blockCon, d_blockCon, numBlock*sizeof(int), hipMemcpyDeviceToHost) );
for (uint i = 0; i < static_cast<size_t>(numBlock); i++)
{
if(!h_blockCon[i]) // false : activate block (not converged)
{
h_BlockLabel[i] = ACTIVE;
h_ActiveList[numActive++] = i;
}
else h_BlockLabel[i] = FARP;
}
////////////////////////copy values from each iteration
cudaSafeCall( hipMemcpy(h_triMem, d_triMem,sizeof(float) *
m_maxNumTotalFaces * numBlock * TRIMEMLENGTH , hipMemcpyDeviceToHost) );
for(int i =0; i < numVert; i++) {
m_meshPtr->vertT[i] = h_triMem[blockVertMapping[i][0]];
}
iteration_values.push_back(m_meshPtr->vertT);
////////////////////////////////END copy
}
cudaSafeCall( hipDeviceSynchronize() );
hipEventRecord(stop,0);
hipEventSynchronize(stop);
cudaSafeCall( hipMemcpy(h_triMem, d_triMem,sizeof(float) * m_maxNumTotalFaces *
numBlock * TRIMEMLENGTH , hipMemcpyDeviceToHost) );
cudaSafeCall( hipDeviceSynchronize() );
hipEventRecord(stopCopy,0);
hipEventSynchronize(stopCopy);
float totalTime, totalAndCopyTime;
hipEventElapsedTime(&totalTime, start, stop);
hipEventElapsedTime(&totalAndCopyTime, startCopy, stopCopy);
cudaCheckErrors();
if (verbose) {
printf("Total Processing time: %f (ms)\n", totalTime);
printf("Total Processing time and copy time: %f (ms)\n", totalAndCopyTime);
printf("The iteration number: %d\n", nTotalIter );
printf("The total iteration number: %d\n", totalIterationNumber );
printf("The total localsolver calls per vertex: %f\n",
totalIterationNumber*m_maxNumTotalFaces*(NITER+1)*3.0 / (float)numVert);
}
vec_triMem.resize(m_maxNumTotalFaces * numBlock * 3);
float maxVertT = 0;
for(int i = 0 ; i < m_maxNumTotalFaces * numBlock; i++)
{
vec_triMem[3*i + 0] = h_triMem[i*TRIMEMLENGTH + 0];
vec_triMem[3*i + 1] = h_triMem[i*TRIMEMLENGTH + 1];
vec_triMem[3*i + 2] = h_triMem[i*TRIMEMLENGTH + 2];
if(h_triMem[i*TRIMEMLENGTH + 0] >= LARGENUM)
vec_triMem[3*i + 0] = -2;
if(h_triMem[i*TRIMEMLENGTH + 1] >= LARGENUM)
vec_triMem[3*i + 1] = -2;
if(h_triMem[i*TRIMEMLENGTH + 2] >= LARGENUM)
vec_triMem[3*i + 2] = -2;
maxVertT = MAX(maxVertT,MAX(vec_triMem[3*i + 2] ,
MAX(vec_triMem[3*i + 1] , vec_triMem[3*i + 0])));
}
int vertIndex = 0;
for(int i =0; i < numVert; i++)
{
m_meshPtr->vertT[i] = h_triMem[blockVertMapping[i][0]];
if(m_meshPtr->vertT[i] == maxVertT)
vertIndex = i;
}
if (verbose)
printf("The maximun vertT is: %f, the vert index is: %d \n", maxVertT,vertIndex );
cudaSafeCall( hipFree(d_ActiveList));
cudaSafeCall( hipFree(d_triMem));
cudaSafeCall( hipFree(d_vertMem));
cudaSafeCall( hipFree(d_edgeMem0));
cudaSafeCall( hipFree(d_edgeMem1));
cudaSafeCall( hipFree(d_edgeMem2));
cudaSafeCall( hipFree(d_speed));
cudaSafeCall( hipFree(d_con));
cudaSafeCall( hipFree(d_blockCon));
free(h_ActiveList);
free(h_edgeMem0);
free(h_edgeMem1);
free(h_edgeMem2);
free(h_speed);
free(h_triMem);
free(h_vertMem);
free(h_BlockLabel);
free(h_blockCon);
free(h_BlockSizes);
return iteration_values;
}
| e3131906009dd0f67273640053401d25d4f35e19.cu |
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// modified to use only 7 floats for triMem
//1. #define TRIMEMLENGTH 7
//2. in FIMCuda and run_neighbor_check, add initilize old at the begining of iteration
//3. in FIMCuda and run_neighbor_check, s_triMem[tx*TRIMEMLENGTH + 3 + C] = TC after each iteration instead of s_triMem[tx*TRIMEMLENGTH + 6 + C] = TC
//4. in FIMCuda and run_neighbor_check, in the reconcile step, there should be no +3 in fetching the location of triMem
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "meshFIM2d_eikonal.h"
#include "Vec.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include "CUDADefines.h"
#include <sstream>
#include <time.h>
#ifdef WIN32
#include <io.h>
#define unlink _unlink
#else
#include <unistd.h>
#endif
extern "C" {
#include <metis.h>
}
/////declaration for cuda kernels///////////////////////////
extern __global__ void run_reduction(int *con, int *blockCon,int* ActiveList, int nActiveBlock, int* blockSizes);
extern __global__ void FIMCuda(float* d_triMem,float* d_triMemOut, int* d_vertMem, int* d_vertMemOutside, float* d_edgeMem0,float* d_edgeMem1,float* d_edgeMem2,float* d_speed, int* d_BlockSizes, int* d_con, int* ActiveList, int nActiveBlock,int maxNumTotalFaces, int maxNumVert,/*int nIter, */float m_StopDistance);
extern __global__ void run_check_neighbor(float* d_triMem,float* d_triMemOut, int* d_vertMem,int* d_vertMemOutside,float* d_edgeMem0,float* d_edgeMem1,float* d_edgeMem2, float* d_speed, int* d_BlockSizes, int* d_con,int* d_ActiveList, int numOldActive ,int maxNumTotalFaces, int maxNumVert,int nTotalActive, int m_StopDistance);
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
return true;
}
#endif
/////////////////////////////////////////////////////////////////////////////
void meshFIM2dEikonal::writeVTK(std::vector< std::vector <float> > time_values)
{
size_t nv = m_meshPtr->vertices.size();
size_t nt = m_meshPtr->faces.size();
for (size_t j = 0; j < time_values.size(); j++) {
FILE* vtkfile;
std::stringstream ss;
ss << "result" << j << ".vtk";
vtkfile = fopen(ss.str().c_str(), "w+");
fprintf(vtkfile, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET UNSTRUCTURED_GRID\n");
fprintf(vtkfile, "POINTS %d float\n", nv);
for (size_t i = 0; i < nv; i++)
{
fprintf(vtkfile, "%.12f %.12f %.12f\n", m_meshPtr->vertices[i][0],
m_meshPtr->vertices[i][1], m_meshPtr->vertices[i][2]);
}
fprintf(vtkfile, "CELLS %d %d\n", nt, nt * 4);
for (size_t i = 0; i < nt; i++)
{
fprintf(vtkfile, "3 %d %d %d\n", m_meshPtr->faces[i][0],
m_meshPtr->faces[i][1], m_meshPtr->faces[i][2]);
}
fprintf(vtkfile, "CELL_TYPES %d\n", nt);
for (size_t i = 0; i < nt; i++)
{
fprintf(vtkfile, "5\n");
}
fprintf(vtkfile, "POINT_DATA %d\nSCALARS traveltime float 1\nLOOKUP_TABLE default\n", nv);
for (size_t i = 0; i < nv; i++)
{
fprintf(vtkfile, "%.12f\n", time_values[j][i]);
}
fclose(vtkfile);
}
}
//create .mesh file from trimesh faces and call partnmesh method
//to partition and create intermediate mesh.npart.N file and then read this file
void meshFIM2dEikonal::GraphPartition_METIS2(int& numBlock, int maxNumBlockVerts, bool verbose)
{
FILE * outf;
outf = fopen("tmp.mesh", "w+");
if(outf == NULL)
{
printf("Cannot open mesh file to write!!!!\n");
exit(1);
}
size_t sz = m_meshPtr->faces.size();
fprintf(outf,"%d 1\n", sz);
for (int i=0;i<sz;i++)
fprintf(outf, "%d %d %d\n",m_meshPtr->faces[i].v[0]+1,m_meshPtr->faces[i].v[1]+1,m_meshPtr->faces[i].v[2]+1);
fclose(outf);
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
char outputFileName[512];
char meshfile[] = "tmp.mesh";
if(numBlock == 0)
{
numBlock = static_cast<int>(numVert) / maxNumBlockVerts;
do{
numBlock++;
m_BlockSizes.resize(numBlock);
for(int i=0; i< numBlock;i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile,numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found: %s\n",outputFileName);
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i<numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumVert = 0;
for(int i = 0 ; i < numBlock; i++)
{
m_maxNumVert = MAX(m_maxNumVert, m_BlockSizes[i]);
}
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}while(m_maxNumVert != maxNumBlockVerts);
}
else
{
m_BlockSizes.resize(numBlock);
for(int i=0; i< numBlock;i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile,numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found: %s\n",outputFileName);
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i<numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumVert = 0;
for(int i = 0 ; i < numBlock; i++)
{
m_maxNumVert = MAX(m_maxNumVert, m_BlockSizes[i]);
}
if (verbose)
printf("max num vert is : %d\n", m_maxNumVert);
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}
srand( (unsigned)time( NULL ) );
if (verbose)
printf("numBlock is : %d\n", numBlock);
float r,g,b;
vector< Color > colors;
colors.resize(numBlock);
for(int i = 0; i< numBlock; i++)
{
r = rand() / (float)RAND_MAX;
g = rand() / (float)RAND_MAX;
b = rand() / (float)RAND_MAX;
colors[i] = Color(r,g,b);
}
m_meshPtr->colors.resize(numVert);
m_PartitionVerts.resize(numBlock);
for(int i = 0; i<numVert; i++)
{
m_PartitionVerts[m_PartitionLabel[i]].push_back(i);
m_meshPtr->colors[i] = colors[m_PartitionLabel[i]];
}
unlink("tmp.mesh");
/*
typedef cusp::array1d<int, cusp::host_memory> IdxVector_h;
typedef cusp::array1d<int, cusp::device_memory> IdxVector_d;
/////////BETTER WAY TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
int options[10], pnumflag = 0, wgtflag = 0;
options[0] = 0;
int edgecut;
npart_h = IdxVector_h(numVert);
nparts = numVert / metissize;
if (nparts < 2)
nparts = 2;
// Counting up edges for adjacency:
int edgeCount = 0;
for (int vIt = 0; vIt < numVert; vIt++)
{
edgeCount += m_meshPtr->neighbors[vIt].size();
}
m_largest_num_inside_mem = 0;
//for(int bidx = 0; bidx < nparts; bidx++)
for (int i = 0; i < numVert; i++)
{
if (m_meshPtr->adjacentfaces[i].size() > m_largest_num_inside_mem)
m_largest_num_inside_mem = m_meshPtr->adjacentfaces[i].size();
}
if (verbose)
printf("m_largest_num_inside_mem = %d\n", m_largest_num_inside_mem);
//Allocating storage for array values of adjacency
int* xadj = new int[numVert + 1];
int* adjncy = new int[edgeCount];
// filling the arrays:
xadj[0] = 0;
int idx = 0;
IdxVector_h neighbor_sizes(numVert);
// Populating the arrays:
for (int i = 1; i < numVert + 1; i++)
{
neighbor_sizes[i - 1] = m_meshPtr->neighbors[i - 1].size();
xadj[i] = xadj[i - 1] + m_meshPtr->neighbors[i - 1].size();
for (int j = 0; j < m_meshPtr->neighbors[i - 1].size(); j++)
{
adjncy[idx++] = m_meshPtr->neighbors[i - 1][j];
}
}
m_neighbor_sizes_d = neighbor_sizes;
int* npart_h_ptr = thrust::raw_pointer_cast(&npart_h[0]);
METIS_PartGraphKway(&numVert, xadj, adjncy, NULL, NULL, &wgtflag,
&pnumflag, &nparts, options, &edgecut, npart_h_ptr);
m_xadj_d = IdxVector_d(&xadj[0], &xadj[numVert + 1]);
m_adjncy_d = IdxVector_d(&adjncy[0], &adjncy[edgeCount]);
IdxVector_h part_sizes(nparts, 0);
for (int i = 0; i < numVert; i++)
{
part_sizes[npart_h[i]]++;
}
int min_part_size = thrust::reduce(part_sizes.begin(),
part_sizes.end(), 100000000, thrust::minimum<int>());
largest_vert_part = thrust::reduce(part_sizes.begin(),
part_sizes.end(), -1, thrust::maximum<int>());
if (verbose)
printf("Largest vertex partition size is: %d\n", largest_vert_part);
if (min_part_size == 0) printf("Min partition size is 0!!\n");
delete[] xadj;
delete[] adjncy;*/
}
void meshFIM2dEikonal::GraphPartition_Square(int squareLength,int squareWidth, int blockLength, int blockWidth, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
int numBlockLength = (squareLength / blockLength);
int numBlockWidth = (squareWidth / blockWidth);
int numBlock = numBlockLength * numBlockWidth;
for(int i = 0; i< squareWidth; i++)
for(int j =0; j< squareLength; j++)
{
m_PartitionLabel[i*squareLength+j] = (i/blockWidth) * numBlockLength + (j/blockLength);
}
m_BlockSizes.resize(numBlock);
for(int i =0; i<numBlock; i++)
m_BlockSizes[i] = 0;
float r,g,b;
vector< Color > colors;
colors.resize(numBlock);
for(int i = 0; i< numBlock; i++)
{
r = rand() / (float)RAND_MAX;
g = rand() / (float)RAND_MAX;
b = rand() / (float)RAND_MAX;
colors[i] = Color(r,g,b);
}
m_meshPtr->colors.resize(numVert);
m_PartitionVerts.resize(numBlock);
for(int i = 0; i<numVert; i++)
{
m_PartitionVerts[m_PartitionLabel[i]].push_back(i);
m_BlockSizes[m_PartitionLabel[i]]++;
m_meshPtr->colors[i] = colors[m_PartitionLabel[i]];
}
m_maxNumVert = 0;
for(int i = 0 ; i < numBlock; i++)
{
m_maxNumVert = MAX(m_maxNumVert, m_BlockSizes[i]);
}
if (verbose)
printf("final number of blocks: %d\n", numBlock);
}
void meshFIM2dEikonal::PartitionFaces(int numBlock)
{
///////////////////step 3: partition faces//////////////////////////////////////
m_PartitionFaces.resize(numBlock);
m_PartitionNbFaces.resize(numBlock);
size_t numFaces = m_meshPtr->faces.size();
TriMesh::Face f;
int labelv0;
int labelv1;
int labelv2;
vector<TriMesh::Face> virtualfaces;
vector<int> virtualFaceCnt;
virtualFaceCnt.resize(numBlock);
m_PartitionVirtualFaces.resize(numBlock);
for(int i = 0; i< numBlock; i++)
virtualFaceCnt[i] = 0;
m_BlockNeighbor.resize(numBlock);
for(int i = 0; i < numFaces; i++)
{
f = m_meshPtr->faces[i];
size_t vfCnt = m_meshPtr->faceVirtualFaces[i].size();
for(int k = 0 ; k < 3; k++)
{
if(!m_meshPtr->IsNonObtuse(f[k], f))
{
virtualFaceCnt[m_PartitionLabel[f[k]]] += static_cast<int>(vfCnt);
m_PartitionVirtualFaces[m_PartitionLabel[f[k]]].insert(m_PartitionVirtualFaces[m_PartitionLabel[f[k]]].end(), m_meshPtr->faceVirtualFaces[i].begin(), m_meshPtr->faceVirtualFaces[i].end());
}
}
labelv0 = m_PartitionLabel[f[0]];
labelv1 = m_PartitionLabel[f[1]];
labelv2 = m_PartitionLabel[f[2]];
if(labelv0 == labelv1 && labelv1 == labelv2)
{
m_PartitionFaces[labelv0].push_back(i);
}
else if(labelv0 == labelv1 && labelv1 != labelv2)
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv2].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv0);
}
else if(labelv0 != labelv1 && labelv1 == labelv2)
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv2].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv0);
}
else if(labelv0 == labelv2 && labelv1 != labelv2)
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv1].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv1);
m_BlockNeighbor[labelv1].insert(m_BlockNeighbor[labelv1].end(), labelv0);
}
else //all different
{
m_PartitionNbFaces[labelv0].push_back(i);
m_PartitionNbFaces[labelv1].push_back(i);
m_PartitionNbFaces[labelv2].push_back(i);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv0);
m_BlockNeighbor[labelv0].insert(m_BlockNeighbor[labelv0].end(), labelv1);
m_BlockNeighbor[labelv1].insert(m_BlockNeighbor[labelv1].end(), labelv0);
m_BlockNeighbor[labelv1].insert(m_BlockNeighbor[labelv1].end(), labelv2);
m_BlockNeighbor[labelv2].insert(m_BlockNeighbor[labelv2].end(), labelv1);
}
}
vector<int> PartitionToltalFaces;
PartitionToltalFaces.resize(numBlock);
m_maxNumTotalFaces = 0;
for(int j = 0; j < numBlock; j++)
{
PartitionToltalFaces[j] = static_cast<int>(m_PartitionFaces[j].size() +
m_PartitionNbFaces[j].size() + virtualFaceCnt[j]);
m_maxNumTotalFaces = MAX(PartitionToltalFaces[j],m_maxNumTotalFaces );
}
}
std::vector< std::vector<float> > meshFIM2dEikonal::GenerateData(int numBlock,
int maxIterations, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
size_t numFaces = m_meshPtr->faces.size();
if(!InitCUDA()) {
exit(1);
}
index *d_ActiveList= 0;
int *d_con;
int *d_con_forComputaion;
int *d_blockCon;
float *d_triMem;
float *d_edgeMem0;
float *d_edgeMem1;
float *d_edgeMem2;
float *d_speed;
float *d_triMemOut;
int *d_vertMem;
int *d_BlockSizes;
index *h_ActiveList= 0; //list of active blocks
int *h_BlockLabel = 0; //block active or not
float *h_triMem;
float *h_edgeMem0;
float *h_edgeMem1;
float *h_edgeMem2;
float *h_speed;
int *h_vertMem;
int *h_blockCon;
int *h_BlockSizes;
/////////////////////////////malloc cpu memories///////////////////////////
h_BlockLabel = (int*) malloc(sizeof(int) * numBlock);
h_edgeMem0 = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_edgeMem1 = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_edgeMem2 = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_speed = (float*)malloc(sizeof(float) * m_maxNumTotalFaces * numBlock);
h_triMem = (float*)malloc(sizeof(float) * TRIMEMLENGTH * m_maxNumTotalFaces * numBlock);
h_vertMem = (int*)malloc(sizeof(int) * VERTMEMLENGTH * m_maxNumVert * numBlock);
h_BlockSizes = (int*)malloc(sizeof(int) * numBlock);
h_blockCon = (int*)malloc(sizeof(int) * numBlock);
/////////////////////////malloc gpu memories//////////////////////////////
cudaSafeCall( cudaMalloc((void**) &d_con, sizeof(int) * numBlock * REDUCTIONSHARESIZE));
cudaSafeCall( cudaMalloc((void**) &d_con_forComputaion, sizeof(int) * numBlock * REDUCTIONSHARESIZE));
cudaSafeCall( cudaMalloc((void**) &d_blockCon, sizeof(int) * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_triMem, sizeof(float) * TRIMEMLENGTH * m_maxNumTotalFaces * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_triMemOut, sizeof(float) * TRIMEMLENGTH * m_maxNumTotalFaces * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_edgeMem0, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_edgeMem1, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_edgeMem2, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_speed, sizeof(float) * m_maxNumTotalFaces * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_vertMem, sizeof(int) * VERTMEMLENGTH * m_maxNumVert * numBlock));
cudaSafeCall( cudaMalloc((void**) &d_BlockSizes, sizeof(int) * numBlock));
/////////////////initialize cpu memories//////////////////////////////
vector< vector<int> > blockVertMapping;
blockVertMapping.resize(numVert); //for each vertex, store the addresses where it appears in the global triMem array.
for( int i = 0; i < numBlock; i++)
{
int blockIdx = i * m_maxNumTotalFaces * TRIMEMLENGTH;
size_t numPF = m_PartitionFaces[i].size();
for(int j = 0; j< numPF; j++)
{
h_edgeMem0[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].edgeLens[0];
h_edgeMem1[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].edgeLens[1];
h_edgeMem2[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].edgeLens[2];
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
h_speed[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionFaces[i][j]].speedInv;
blockVertMapping[m_meshPtr->faces[m_PartitionFaces[i][j]][0]].push_back(blockIdx + j*TRIMEMLENGTH + 0);
blockVertMapping[m_meshPtr->faces[m_PartitionFaces[i][j]][1]].push_back(blockIdx + j*TRIMEMLENGTH + 1);
blockVertMapping[m_meshPtr->faces[m_PartitionFaces[i][j]][2]].push_back(blockIdx + j*TRIMEMLENGTH + 2);
}
}
for( int i = 0; i < numBlock; i++)
{
h_blockCon[i] = 1;
h_BlockLabel[i] = m_BlockLabel[i];
h_BlockSizes[i] = m_BlockSizes[i];
int blockIdx = i * m_maxNumTotalFaces * TRIMEMLENGTH;
size_t numPF = m_PartitionFaces[i].size();
size_t numPNF = m_PartitionNbFaces[i].size();
size_t numPVF = m_PartitionVirtualFaces[i].size();
int k = 0;
int l = 0;
for(int j = static_cast<int>(numPF); j< m_maxNumTotalFaces; j++)
{
if( j < numPF + numPNF)
{
h_edgeMem0[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionNbFaces[i][k]].edgeLens[0];
h_edgeMem1[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionNbFaces[i][k]].edgeLens[1];
h_edgeMem2[i * m_maxNumTotalFaces + j]= m_meshPtr->faces[m_PartitionNbFaces[i][k]].edgeLens[2];
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
h_speed[i * m_maxNumTotalFaces + j] = m_meshPtr->faces[m_PartitionNbFaces[i][k]].speedInv;
blockVertMapping[m_meshPtr->faces[m_PartitionNbFaces[i][k]][0]].push_back(blockIdx + j*TRIMEMLENGTH + 0);
blockVertMapping[m_meshPtr->faces[m_PartitionNbFaces[i][k]][1]].push_back(blockIdx + j*TRIMEMLENGTH + 1);
blockVertMapping[m_meshPtr->faces[m_PartitionNbFaces[i][k]][2]].push_back(blockIdx + j*TRIMEMLENGTH + 2);
k++;
}
else if (j < numPF + numPNF + numPVF)
{
h_edgeMem0[i * m_maxNumTotalFaces + j]= m_PartitionVirtualFaces[i][l].edgeLens[0];
h_edgeMem1[i * m_maxNumTotalFaces + j]= m_PartitionVirtualFaces[i][l].edgeLens[1];
h_edgeMem2[i * m_maxNumTotalFaces + j] = m_PartitionVirtualFaces[i][l].edgeLens[2];
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
h_speed[i * m_maxNumTotalFaces + j] =m_PartitionVirtualFaces[i][l].speedInv;
blockVertMapping[m_PartitionVirtualFaces[i][l][0]].push_back(blockIdx + j*TRIMEMLENGTH + 0);
blockVertMapping[m_PartitionVirtualFaces[i][l][1]].push_back(blockIdx + j*TRIMEMLENGTH + 1);
blockVertMapping[m_PartitionVirtualFaces[i][l][2]].push_back(blockIdx + j*TRIMEMLENGTH + 2);
l++;
}
else
{
h_triMem[blockIdx + j*TRIMEMLENGTH + 0] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 1] = LARGENUM;
h_triMem[blockIdx + j*TRIMEMLENGTH + 2] = LARGENUM;
}
}
}
m_maxNumVertMapping = 0;
for(int i =0; i < numVert; i++)
{
int blockIndex = m_PartitionLabel[i];
int tmp = blockVertMapping[i][0];
int maxi = (blockIndex+1) * m_maxNumTotalFaces * TRIMEMLENGTH;
int mini = blockIndex * m_maxNumTotalFaces * TRIMEMLENGTH;
if( ( tmp< mini) || (tmp >= maxi) )
{
for(int j =0; j < blockVertMapping[i].size(); j++)
if(blockVertMapping[i][j] >= mini && blockVertMapping[i][j] < maxi )
{
int swaptmp = tmp;
blockVertMapping[i][0] = blockVertMapping[i][j];
blockVertMapping[i][j] = swaptmp;
break;
}
}
m_maxNumVertMapping = static_cast<int>(MAX(m_maxNumVertMapping, blockVertMapping[i].size()));
}
for(int i =0; i < numVert; i++)
{
int blockIndex = m_PartitionLabel[i];
int tmp = blockVertMapping[i][0];
int maxi = (blockIndex+1) * m_maxNumTotalFaces * TRIMEMLENGTH;
int mini = blockIndex * m_maxNumTotalFaces * TRIMEMLENGTH;
if( ( tmp< mini) || (tmp >= maxi) )
{
printf(" WARNING: block beyond limits.");
}
}
vector< vector<int> > blockVertMappingInside;
vector< vector<int> > blockVertMappingOutside;
blockVertMappingInside.resize(numVert);
blockVertMappingOutside.resize(numVert);
for(int i = 0; i< numBlock; i++)
{
int triIdx = i * TRIMEMLENGTH * m_maxNumTotalFaces;
for(int m = 0; m < m_PartitionVerts[i].size(); m++)
{
vector<int> tmp = blockVertMapping[m_PartitionVerts[i][m]];
for(int n = 0; n < tmp.size(); n++)
{
if( tmp[n] >= triIdx + 0 && tmp[n] < triIdx + m_maxNumTotalFaces*TRIMEMLENGTH)
blockVertMappingInside[m_PartitionVerts[i][m]].push_back(tmp[n]);
else
{
blockVertMappingOutside[m_PartitionVerts[i][m]].push_back(tmp[n]);
}
}
}
}
int maxVertMappingInside = 0;
int maxVertMappingOutside = 0;
for(int i =0; i< numVert; i++)
{
maxVertMappingInside = static_cast<int>(MAX(maxVertMappingInside, (blockVertMappingInside[i].size())));
maxVertMappingOutside = static_cast<int>(MAX(maxVertMappingInside, (blockVertMappingOutside[i].size())));
}
if (verbose) {
printf("maxVertMappingInside is: %d\n",maxVertMappingInside);
printf("maxVertMappingOutside is: %d\n",maxVertMappingOutside);
}
for(int i = 0; i< numBlock; i++)
{
int vertIdx = i * VERTMEMLENGTH * m_maxNumVert;
for(int m = 0; m < m_PartitionVerts[i].size(); m++)
{
size_t tmpsize = blockVertMappingInside[m_PartitionVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMem[vertIdx + m*VERTMEMLENGTH + n] = blockVertMappingInside[m_PartitionVerts[i][m]][n];
for(;n<VERTMEMLENGTH; n++)
h_vertMem[vertIdx + m*VERTMEMLENGTH + n] = -1 + i*m_maxNumTotalFaces*TRIMEMLENGTH;
}
for (size_t m = m_PartitionVerts[i].size() * VERTMEMLENGTH; m < m_maxNumVert * VERTMEMLENGTH; m++)
{
h_vertMem[vertIdx + m] = -1 + i*m_maxNumTotalFaces*TRIMEMLENGTH;
}
}
int* h_vertMemOutside = (int*)malloc(m_maxNumVert * numBlock * VERTMEMLENGTHOUTSIDE * sizeof(int));
int* d_vertMemOutside;
cudaSafeCall( cudaMalloc((void**) &d_vertMemOutside, m_maxNumVert * numBlock * VERTMEMLENGTHOUTSIDE * sizeof(int) ) );
for(int i = 0; i< numBlock; i++)
{
int vertIdx = i * VERTMEMLENGTHOUTSIDE * m_maxNumVert;
for(int m = 0; m < m_PartitionVerts[i].size(); m++)
{
size_t tmpsize = blockVertMappingOutside[m_PartitionVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMemOutside[vertIdx + m*VERTMEMLENGTHOUTSIDE + n] = blockVertMappingOutside[m_PartitionVerts[i][m]][n];
for(;n<VERTMEMLENGTHOUTSIDE; n++)
h_vertMemOutside[vertIdx + m*VERTMEMLENGTHOUTSIDE + n] = -1;
}
for(size_t m = m_PartitionVerts[i].size() * VERTMEMLENGTHOUTSIDE; m < m_maxNumVert * VERTMEMLENGTHOUTSIDE; m++)
{
h_vertMemOutside[vertIdx + m] = -1;
}
}
h_ActiveList = (int*)malloc(sizeof(int)*numBlock);
cudaSafeCall( cudaMalloc((void**) &d_ActiveList, sizeof(int) * numBlock));
//////////////////////////////////////////////////////////////////////////////////
vector<int> nb;
int numActive;
for( int i = 0; i < numBlock; i++)
{
h_blockCon[i] = 1;
h_BlockLabel[i] = m_BlockLabel[i];
h_BlockSizes[i] = m_BlockSizes[i];
}
//////////////initialize the seed points for h_triMem////////////////////////////////////
for(int i = 0; i< m_SeedPoints.size(); i++)
{
int seed = m_SeedPoints[i];
int seedBelongToBlock = m_PartitionLabel[seed];
h_blockCon[seedBelongToBlock] = 0;
for(int j = 0; j < blockVertMapping[seed].size(); j++)
{
h_triMem[blockVertMapping[seed][j]] = 0.0;
}
}
/////////////copy triMem and verMem to a vector just for debugging/////////////////
vector<float> vec_triMem;
vector<int> vec_vertMem;
vector<int> vec_vertMemOutside;
vec_triMem.resize(TRIMEMLENGTH * m_maxNumTotalFaces * numBlock);
vec_vertMem.resize(VERTMEMLENGTH * m_maxNumVert * numBlock);
vec_vertMemOutside.resize(VERTMEMLENGTHOUTSIDE * m_maxNumVert * numBlock);
for(int i =0; i < TRIMEMLENGTH * m_maxNumTotalFaces * numBlock; i++)
vec_triMem[i] = h_triMem[i];
for(int i = 0; i< VERTMEMLENGTH * m_maxNumVert * numBlock; i++)
vec_vertMem[i] = h_vertMem[i];
for(int i = 0; i< VERTMEMLENGTHOUTSIDE * m_maxNumVert * numBlock; i++)
vec_vertMemOutside[i] = h_vertMemOutside[i];
////////////////////////////////////////////////////////////////////////////
cudaSafeCall( cudaMemcpy( d_triMem,h_triMem, sizeof(float) * m_maxNumTotalFaces * numBlock * TRIMEMLENGTH, cudaMemcpyHostToDevice));
numActive = static_cast<int>(m_ActiveBlocks.size());
set<int>::iterator activeiter = m_ActiveBlocks.begin();
for(int i =0; activeiter != m_ActiveBlocks.end(); activeiter++)
h_ActiveList[i++] = *activeiter;
cudaEvent_t start, stop, startCopy, stopCopy;
cudaEventCreate(&start);
cudaEventCreate(&startCopy);
cudaEventCreate(&stopCopy);
cudaEventCreate(&stop);
cudaEventRecord(startCopy,0);
//////////////////copy to gpu memories///////////////////////////////
cudaSafeCall( cudaMemcpy( d_triMem,h_triMem, sizeof(float) * m_maxNumTotalFaces * numBlock * TRIMEMLENGTH, cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_triMemOut,h_triMem, sizeof(float) * m_maxNumTotalFaces * numBlock * TRIMEMLENGTH, cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_edgeMem0,h_edgeMem0, sizeof(float) * m_maxNumTotalFaces * numBlock , cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_edgeMem1,h_edgeMem1, sizeof(float) * m_maxNumTotalFaces * numBlock , cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_edgeMem2,h_edgeMem2, sizeof(float) * m_maxNumTotalFaces * numBlock , cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_speed,h_speed, sizeof(float) * m_maxNumTotalFaces * numBlock , cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_vertMem,h_vertMem, sizeof(int) * m_maxNumVert * numBlock * VERTMEMLENGTH, cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_vertMemOutside,h_vertMemOutside, sizeof(int) * m_maxNumVert * numBlock * VERTMEMLENGTHOUTSIDE, cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_BlockSizes,h_BlockSizes, sizeof(int) * numBlock, cudaMemcpyHostToDevice));
cudaSafeCall( cudaMemcpy( d_blockCon,h_blockCon, sizeof(int) * numBlock, cudaMemcpyHostToDevice));
if (verbose)
printf("max number of triangles per block: %d\n", m_maxNumTotalFaces);
int nTotalIter = 0;
cudaEventRecord(start,0);
int totalIterationNumber = 0;
std::vector<std::vector<float> > iteration_values;
while ( numActive > 0)
{
///////////step 1: run solver /////////////////////////////////////////////////////////////
nTotalIter++;
if (nTotalIter > maxIterations) break;
totalIterationNumber += numActive;
if (verbose ) {
size_t act = numActive / 3;
for(size_t ab = 0; ab < 60; ab++) {
if (ab < act)
printf("=");
else
printf(" ");
}
printf(" %d Active blocks.\n", numActive);
}
dim3 dimGrid(numActive, 1);
dim3 dimBlock(m_maxNumTotalFaces, 1);
cudaSafeCall( cudaMemcpy( d_ActiveList,h_ActiveList,sizeof(int) * numBlock, cudaMemcpyHostToDevice));
FIMCuda<<<dimGrid, dimBlock, m_maxNumTotalFaces*TRIMEMLENGTH*sizeof(float)+m_maxNumVert*VERTMEMLENGTH*sizeof(short)>>>( d_triMem,d_triMemOut, d_vertMem,d_vertMemOutside,d_edgeMem0,d_edgeMem1,d_edgeMem2, d_speed, d_BlockSizes, d_con,d_ActiveList, numActive,m_maxNumTotalFaces, m_maxNumVert, m_StopDistance);
cudaCheckErrors();
//////////////////////step 2: reduction////////////////////////////////////////////////
dimBlock = dim3(REDUCTIONSHARESIZE / 2 , 1);
run_reduction<<<dimGrid, dimBlock/*, sizeof(int)*m_maxNumVert*/>>>(d_con, d_blockCon,d_ActiveList, numActive, d_BlockSizes);
cudaCheckErrors();
//////////////////////////////////////////////////////////////////
// 3. check neighbor tiles of converged tile
// Add any active block of neighbor of converged block is inserted
// to the list
cudaSafeCall( cudaMemcpy(h_blockCon, d_blockCon, numBlock*sizeof(int), cudaMemcpyDeviceToHost) );
int nOldActiveBlock = numActive;
for (size_t i = 0; i < static_cast<size_t>(nOldActiveBlock); i++)
{
// check neighbors of current active tile
uint currBlkIdx = h_ActiveList[i];
if(h_blockCon[currBlkIdx]) // not active : converged
{
set<int> nb = m_BlockNeighbor[currBlkIdx];
set<int>::iterator iter;
for( iter = nb.begin(); iter != nb.end() ; iter++)
{
int currIdx = *iter;
if(h_BlockLabel[currIdx] == FARP)
{
h_BlockLabel[currIdx] = ACTIVE;
h_ActiveList[numActive++] = currIdx;
}
}
}
}
//////////////////////////////////////////////////////////////////
// 4. run solver only once for neighbor blocks of converged block
// current active list contains active blocks and neighbor blocks of
// any converged blocks
//
cudaSafeCall( cudaMemcpy(d_ActiveList, h_ActiveList, numActive*sizeof(int), cudaMemcpyHostToDevice) );
dimGrid = dim3(numActive, 1);
dimBlock = dim3(m_maxNumTotalFaces, 1);
run_check_neighbor << < dimGrid, dimBlock, m_maxNumTotalFaces*TRIMEMLENGTH*sizeof(float) +
m_maxNumVert*VERTMEMLENGTH*sizeof(short) >> >(d_triMemOut, d_triMem, d_vertMem, d_vertMemOutside,
d_edgeMem0, d_edgeMem1, d_edgeMem2, d_speed, d_BlockSizes, d_con, d_ActiveList, nOldActiveBlock,
m_maxNumTotalFaces, m_maxNumVert, numActive, static_cast<int>(m_StopDistance));
cudaCheckErrors();
//////////////////////////////////////////////////////////////////
// 5. reduction
dimGrid = dim3(numActive, 1);
dimBlock = dim3(REDUCTIONSHARESIZE / 2 , 1);
run_reduction<<<dimGrid, dimBlock/*, sizeof(int)*m_maxNumVert*/>>>(d_con, d_blockCon,d_ActiveList,numActive, d_BlockSizes);
cudaCheckErrors();
//////////////////////////////////////////////////////////////////
// 6. update active list
// read back active volume from the device and add
// active block to active list on the host memory
numActive = 0;
cudaSafeCall( cudaMemcpy(h_blockCon, d_blockCon, numBlock*sizeof(int), cudaMemcpyDeviceToHost) );
for (uint i = 0; i < static_cast<size_t>(numBlock); i++)
{
if(!h_blockCon[i]) // false : activate block (not converged)
{
h_BlockLabel[i] = ACTIVE;
h_ActiveList[numActive++] = i;
}
else h_BlockLabel[i] = FARP;
}
////////////////////////copy values from each iteration
cudaSafeCall( cudaMemcpy(h_triMem, d_triMem,sizeof(float) *
m_maxNumTotalFaces * numBlock * TRIMEMLENGTH , cudaMemcpyDeviceToHost) );
for(int i =0; i < numVert; i++) {
m_meshPtr->vertT[i] = h_triMem[blockVertMapping[i][0]];
}
iteration_values.push_back(m_meshPtr->vertT);
////////////////////////////////END copy
}
cudaSafeCall( cudaThreadSynchronize() );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaSafeCall( cudaMemcpy(h_triMem, d_triMem,sizeof(float) * m_maxNumTotalFaces *
numBlock * TRIMEMLENGTH , cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaThreadSynchronize() );
cudaEventRecord(stopCopy,0);
cudaEventSynchronize(stopCopy);
float totalTime, totalAndCopyTime;
cudaEventElapsedTime(&totalTime, start, stop);
cudaEventElapsedTime(&totalAndCopyTime, startCopy, stopCopy);
cudaCheckErrors();
if (verbose) {
printf("Total Processing time: %f (ms)\n", totalTime);
printf("Total Processing time and copy time: %f (ms)\n", totalAndCopyTime);
printf("The iteration number: %d\n", nTotalIter );
printf("The total iteration number: %d\n", totalIterationNumber );
printf("The total localsolver calls per vertex: %f\n",
totalIterationNumber*m_maxNumTotalFaces*(NITER+1)*3.0 / (float)numVert);
}
vec_triMem.resize(m_maxNumTotalFaces * numBlock * 3);
float maxVertT = 0;
for(int i = 0 ; i < m_maxNumTotalFaces * numBlock; i++)
{
vec_triMem[3*i + 0] = h_triMem[i*TRIMEMLENGTH + 0];
vec_triMem[3*i + 1] = h_triMem[i*TRIMEMLENGTH + 1];
vec_triMem[3*i + 2] = h_triMem[i*TRIMEMLENGTH + 2];
if(h_triMem[i*TRIMEMLENGTH + 0] >= LARGENUM)
vec_triMem[3*i + 0] = -2;
if(h_triMem[i*TRIMEMLENGTH + 1] >= LARGENUM)
vec_triMem[3*i + 1] = -2;
if(h_triMem[i*TRIMEMLENGTH + 2] >= LARGENUM)
vec_triMem[3*i + 2] = -2;
maxVertT = MAX(maxVertT,MAX(vec_triMem[3*i + 2] ,
MAX(vec_triMem[3*i + 1] , vec_triMem[3*i + 0])));
}
int vertIndex = 0;
for(int i =0; i < numVert; i++)
{
m_meshPtr->vertT[i] = h_triMem[blockVertMapping[i][0]];
if(m_meshPtr->vertT[i] == maxVertT)
vertIndex = i;
}
if (verbose)
printf("The maximun vertT is: %f, the vert index is: %d \n", maxVertT,vertIndex );
cudaSafeCall( cudaFree(d_ActiveList));
cudaSafeCall( cudaFree(d_triMem));
cudaSafeCall( cudaFree(d_vertMem));
cudaSafeCall( cudaFree(d_edgeMem0));
cudaSafeCall( cudaFree(d_edgeMem1));
cudaSafeCall( cudaFree(d_edgeMem2));
cudaSafeCall( cudaFree(d_speed));
cudaSafeCall( cudaFree(d_con));
cudaSafeCall( cudaFree(d_blockCon));
free(h_ActiveList);
free(h_edgeMem0);
free(h_edgeMem1);
free(h_edgeMem2);
free(h_speed);
free(h_triMem);
free(h_vertMem);
free(h_BlockLabel);
free(h_blockCon);
free(h_BlockSizes);
return iteration_values;
}
|
bb368b75a3e19bee61bb7e064a7cb87e4cf0860b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
hipMalloc((void **) &d_R, size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_G, size);
hipMemcpy(d_G, G, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_gray, size);
hipLaunchKernelGGL(( cudaGrayScale), dim3(ceil(n/1024.0)), dim3(1024), 0, 0, d_R, d_G, d_B, d_gray, n);
hipMemcpy(grayscale, d_gray, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
hipFree(d_gray);
}
| bb368b75a3e19bee61bb7e064a7cb87e4cf0860b.cu | #include <cuda_runtime.h>
#include <cuda.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
cudaMalloc((void **) &d_R, size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_G, size);
cudaMemcpy(d_G, G, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
cudaMemcpy(grayscale, d_gray, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
cudaFree(d_gray);
}
|
21172c2df730166d9daf8d5136ea1500ae7ee1eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_log_full_device.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
unsigned int size = 1;
int epsilon = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_log_full_device), dim3(gridBlock),dim3(threadBlock), 0, 0, x,out,size,epsilon);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_log_full_device), dim3(gridBlock),dim3(threadBlock), 0, 0, x,out,size,epsilon);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_log_full_device), dim3(gridBlock),dim3(threadBlock), 0, 0, x,out,size,epsilon);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 21172c2df730166d9daf8d5136ea1500ae7ee1eb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_log_full_device.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
unsigned int size = 1;
int epsilon = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_log_full_device<<<gridBlock,threadBlock>>>(x,out,size,epsilon);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_log_full_device<<<gridBlock,threadBlock>>>(x,out,size,epsilon);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_log_full_device<<<gridBlock,threadBlock>>>(x,out,size,epsilon);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
00e6f0f68d436f57f5efc6438ff83d2102313ad1.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __FFT_CU__
#define __FFT_CU__
#include "complex.cuh"
void fft(hipfftComplex *a,int n,float dt)
{
double pi=-PI;
if (dt<0)
pi=PI;
for (int m=n/2,j=0,i=1;i<n-1;i++)
{
int k;
for (k=m;k<=j;k/=2)
j-=k;
j+=k;
if(i<j)
{
hipfftComplex t=a[i];
a[i]=a[j];
a[j]=t;
}
}
for (int m=1,step=2;m<n;m=step,step*=2)
{
hipfftComplex u=One;
hipfftComplex w=cmplx(cos(pi/m),sin(pi/m));
for (int j=0;j<m;j++)
{
for (int i=j;i<n;i+=step)
{
int k=i+m;
hipfftComplex t=cmltp(a[k], u);
a[k]=cplus(a[i],cngtv(t));
a[i]=cplus(a[i],t);
}
u=cmltp(u,w);
}
}
if (dt<0)
dt=-1/(n*dt);
for (int i=0;i<n;i++)
a[i]=dmltp(dt,a[i]);
}
void fftr(hipfftComplex *x,int n,float dt)
{
int n2=n/2;
float delw=PI/n;
hipfftComplex isg=IMAGE;
if (dt>0)
{
delw=-delw;
isg=cngtv(isg);
fft(x,n,dt);
}
x[0]=cmplx(x[0].x+x[0].y,x[0].x-x[0].y);
float w=delw;
for (int i=1;i<n2;i++)
{
int j=n-i;
hipfftComplex t=conjg(x[j]);
hipfftComplex g=cplus(x[i],t);
hipfftComplex h=cplus(x[i],cngtv(t));
h=cmltp(cmplx(cos(w),sin(w)),h);
x[i]=dmltp(0.5,cplus(g,cmltp(isg,h)));
x[j]=dmltp(0.5,cplus(conjg(g),cmltp(isg,conjg(h))));
w+=delw;
}
x[n2]=conjg(x[n2]);
if (dt<0)
{
x[0]=dmltp(0.5,x[0]);
fft(x,n,dt);
}
}
#endif | 00e6f0f68d436f57f5efc6438ff83d2102313ad1.cu | #ifndef __FFT_CU__
#define __FFT_CU__
#include "complex.cuh"
void fft(cufftComplex *a,int n,float dt)
{
double pi=-PI;
if (dt<0)
pi=PI;
for (int m=n/2,j=0,i=1;i<n-1;i++)
{
int k;
for (k=m;k<=j;k/=2)
j-=k;
j+=k;
if(i<j)
{
cufftComplex t=a[i];
a[i]=a[j];
a[j]=t;
}
}
for (int m=1,step=2;m<n;m=step,step*=2)
{
cufftComplex u=One;
cufftComplex w=cmplx(cos(pi/m),sin(pi/m));
for (int j=0;j<m;j++)
{
for (int i=j;i<n;i+=step)
{
int k=i+m;
cufftComplex t=cmltp(a[k], u);
a[k]=cplus(a[i],cngtv(t));
a[i]=cplus(a[i],t);
}
u=cmltp(u,w);
}
}
if (dt<0)
dt=-1/(n*dt);
for (int i=0;i<n;i++)
a[i]=dmltp(dt,a[i]);
}
void fftr(cufftComplex *x,int n,float dt)
{
int n2=n/2;
float delw=PI/n;
cufftComplex isg=IMAGE;
if (dt>0)
{
delw=-delw;
isg=cngtv(isg);
fft(x,n,dt);
}
x[0]=cmplx(x[0].x+x[0].y,x[0].x-x[0].y);
float w=delw;
for (int i=1;i<n2;i++)
{
int j=n-i;
cufftComplex t=conjg(x[j]);
cufftComplex g=cplus(x[i],t);
cufftComplex h=cplus(x[i],cngtv(t));
h=cmltp(cmplx(cos(w),sin(w)),h);
x[i]=dmltp(0.5,cplus(g,cmltp(isg,h)));
x[j]=dmltp(0.5,cplus(conjg(g),cmltp(isg,conjg(h))));
w+=delw;
}
x[n2]=conjg(x[n2]);
if (dt<0)
{
x[0]=dmltp(0.5,x[0]);
fft(x,n,dt);
}
}
#endif |
d231d26c46f4f60c50f516539ef7401d4bb24101.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <ctime>
#include "gl/glew.h"
#include "gl/glut.h"
#include "hip/hip_runtime.h"
#include "cuda_gl_interop.h"
using namespace std;
#define W_LEFT -150.0f
#define W_RIGHT 150.0f
#define W_BOTTOM -150.0f
#define W_TOP 150.0f
#define GL_ERROR(x, i) std::cout << x << ": Error code -> " << i << std::endl
#define MSG(x) std::cout << (x) << std::endl
#define RND(i) ((float) rand() / RAND_MAX) * (i)
#define NUM_PARTICLES 512 * 15000
#define THREADS_PER_BLOCK 128
#define BLOCKS NUM_PARTICLES / THREADS_PER_BLOCK
#define TIME_SECS (float) clock() / CLOCKS_PER_SEC
#define ACTION_NONE 0
#define ACTION_ATTRACT 1
#define ACTION_REPULSE 2
#define G_CONSTANT 9.8f
#define SHOW_FPS
void InitGL();
void Render();
void Reshape(int, int);
void Keyboard(unsigned char, int, int);
void Mouse(int, int, int, int);
void MouseMotion(int, int);
void InitBuffers();
void InitShaders();
void InitCUDA();
void LastGLError(const char*);
void DrawParticles(float3);
char * LoadTextFile(const char *);
GLuint CreateShader(char*,int,GLenum);
struct Particle {
float2 position;
float2 velocity;
};
const float3 colors[5] = {
make_float3(1.0, 0.2, 0.2),
make_float3(0.7, 0.7, 0.2),
make_float3(0.0, 0.3, 0.8),
make_float3(0.8, 0.2, 0.8),
make_float3(0.2, 1.0, 0.2)
};
GLuint vao, vbo;
GLuint shaderProgram;
cudaGraphicsResource * vboCuda;
int action = ACTION_NONE;
GLfloat actionX, actionY;
__global__ void EvolveParticles(Particle * particles, GLfloat dt, int action, GLfloat actionX, GLfloat actionY) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
// Load particle from global memory
Particle p = particles[index];
p.position.x += p.velocity.x * dt;
p.position.y += p.velocity.y * dt;
if(action == ACTION_ATTRACT) {
float dx = actionX - p.position.x;
float dy = actionY - p.position.y;
float module = sqrtf(dx*dx + dy*dy);
if(module < 1.0f) module = 1.0f;
p.velocity.x += dx / module * G_CONSTANT * 8.0f / module;
p.velocity.y += dy / module * G_CONSTANT * 8.0f / module;
} else if(action == ACTION_REPULSE) {
float dx = actionX - p.position.x;
float dy = actionY - p.position.y;
float module = sqrtf(dx*dx + dy*dy);
if(module < 1.0f) module = 1.0f;
p.velocity.x -= dx / module * G_CONSTANT * 60.0f / module;
p.velocity.y -= dy / module * G_CONSTANT * 60.0f / module;
}
p.velocity.y -= dt * G_CONSTANT;
if(p.position.y < W_BOTTOM && p.velocity.y < 0.0f) {
p.velocity.y = - p.velocity.y * 0.5f;
p.velocity.x = p.velocity.x * 0.8f;
}
else if(p.position.y > W_TOP && p.velocity.y > 0.0f)
p.velocity.y = - p.velocity.y;
else if(p.position.x < W_LEFT && p.velocity.x < 0.0f)
p.velocity.x = - p.velocity.x * 0.8f;
else if(p.position.x > W_RIGHT && p.velocity.x > 0.0f)
p.velocity.x = - p.velocity.x * 0.8f;
// Store updated particle on global memory
particles[index] = p;
}
int main(int argc, char **argv) {
// prima di TUTTO (TUTTO TUTTO TUTTO) creare la finestra senn non funziona una mazza
glutInit(&argc, argv);
// rgba mode, double buffering, depth buffering, stencil buffering
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
// window' top left corner position
glutInitWindowPosition(0,0);
// window's size
glutInitWindowSize(640, 480);
// create window
glutCreateWindow("CUDA Particles");
// Finestra creata... Adesso dovrebbe andare, ma
glewInit();
InitGL();
glutDisplayFunc(Render);
glutReshapeFunc(Reshape);
glutIdleFunc(Render);
glutKeyboardFunc(Keyboard);
glutMouseFunc(Mouse);
glutMotionFunc(MouseMotion);
//glutFullScreen();
glutMainLoop();
}
// called when window is resized
void Reshape(int w, int h) {
glViewport(0,0,w,h); // viewport resize
}
// called when window is drawn
void Render() {
static float prevTime = TIME_SECS;
static float dtsecs = 0.0f;
static float fpsTimer = 0.0f;
static float colorTimer = 0.0f;
static float3 color1, color2, colorResult;
static int colorIndex = 0;
static int fps;
dtsecs = TIME_SECS - prevTime;
prevTime = TIME_SECS;
colorTimer += dtsecs / 10.0f;
if(colorTimer >= 2.0f) {
colorTimer = 0.0f;
if(colorIndex == 4)
colorIndex = 0;
else
colorIndex++;
}
color1 = colors[colorIndex];
color2 = colorIndex == 4 ? colors[0] : colors[colorIndex+1];
if(colorTimer < 1.0f)
colorResult = color1;
else {
float t = colorTimer - 1.0f;
colorResult.x = color2.x * t + color1.x * (1-t);
colorResult.y = color2.y * t + color1.y * (1-t);
colorResult.z = color2.z * t + color1.z * (1-t);
}
#ifdef SHOW_FPS
fpsTimer += dtsecs;
fps++;
if(fpsTimer > 1.0f) {
std::cout << "FPS: " << fps << std::endl;
fpsTimer -= 1.0f;
fps = 0;
}
#endif
Particle * particles;
size_t length;
hipGraphicsMapResources(1, &vboCuda, 0);
hipGraphicsResourceGetMappedPointer((void**)&particles, &length, vboCuda);
hipLaunchKernelGGL(( EvolveParticles), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, particles, dtsecs, action, actionX, actionY);
hipDeviceSynchronize();
hipGraphicsUnmapResources(1, &vboCuda, 0);
DrawParticles(colorResult);
glutSwapBuffers(); // swap backbuffer with frontbuffer
}
void InitGL() {
// Init opengl(depth test, blending, lighting and so on...)
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE); // additive blending
InitBuffers();
InitCUDA();
InitShaders();
LastGLError("InitGL():");
}
// Called by keyboard events
void Keyboard(unsigned char key, int x, int y) {
if(key == 'q') {
exit(0);
}
}
// Called by mouse events
void Mouse(int button, int state, int x, int y) {
if(state == GLUT_DOWN) {
actionX = (GLfloat) x / glutGet(GLUT_WINDOW_WIDTH) * (W_RIGHT - W_LEFT) + W_LEFT;
actionY = (W_TOP - W_BOTTOM) - (GLfloat) y / glutGet(GLUT_WINDOW_HEIGHT) * (W_TOP - W_BOTTOM) + W_BOTTOM;
if(button == GLUT_LEFT)
action = ACTION_ATTRACT;
else
action = ACTION_REPULSE;
} else
action = ACTION_NONE;
}
void MouseMotion(int x, int y) {
if(action != ACTION_NONE) {
actionX = (GLfloat) x / glutGet(GLUT_WINDOW_WIDTH) * (W_RIGHT - W_LEFT) + W_LEFT;
actionY = (W_TOP - W_BOTTOM) - (GLfloat) y / glutGet(GLUT_WINDOW_HEIGHT) * (W_TOP - W_BOTTOM) + W_BOTTOM;
}
}
void InitBuffers() {
Particle * particles = new Particle[NUM_PARTICLES];
for(int i = 0; i < NUM_PARTICLES; i++) {
particles[i].position.x = RND(W_RIGHT - W_LEFT) + W_LEFT;
particles[i].position.y = RND(W_TOP - W_BOTTOM) + W_BOTTOM;
particles[i].velocity.x = RND(20.0f) - 10.0f;
particles[i].velocity.y = RND(20.0f) - 10.0f;
}
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(Particle) * NUM_PARTICLES, (void*) particles, GL_STATIC_DRAW);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 16, 0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 16, (char*)NULL + 8);
glEnableVertexAttribArray(0);
//glEnableVertexAttribArray(1);
glBindVertexArray(0);
LastGLError("InitBuffers()");
}
void InitCUDA() {
hipGLSetGLDevice(0);
hipGraphicsGLRegisterBuffer(&vboCuda, vbo, hipGraphicsMapFlagsNone);
}
void DrawParticles(float3 color) {
static float tmatrix[16];
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(W_LEFT, W_RIGHT, W_BOTTOM, W_TOP, -1, 1);
glUseProgram(shaderProgram);
int locMat = glGetUniformLocation(shaderProgram, "in_ModelViewProjectionMatrix");
int locColor = glGetUniformLocation(shaderProgram, "in_Color");
glGetFloatv(GL_PROJECTION_MATRIX, tmatrix);
glUniformMatrix4fv(locMat, 1, GL_FALSE, tmatrix);
glUniform4f(locColor, color.x, color.y, color.z, 0.1f);
glBindVertexArray(vao);
glDrawArrays(GL_POINTS, 0, NUM_PARTICLES);
glBindVertexArray(0);
glUseProgram(0);
}
void LastGLError(const char * msg) {
GLuint error = glGetError();
if(error != GL_NO_ERROR)
GL_ERROR(msg, error);
}
char * LoadTextFile(const char * fileName, int * length) {
ifstream file;
char * data = NULL;
int len;
file.open(fileName, ifstream::binary);
file.seekg(0, ios_base::end);
len = file.tellg();
file.seekg(0, ios_base::beg);
data = new char[len];
if(!file.eof())
file.read(data, len);
else
MSG("Bad file!");
file.close();
*length = len;
return data;
}
void InitShaders() {
GLuint vs, gs, fs;
int vsl, gsl, fsl;
char *vsSrc, *gsSrc, *fsSrc;
vsSrc = LoadTextFile("particles.vert", &vsl);
gsSrc = LoadTextFile("particles.geom", &gsl);
fsSrc = LoadTextFile("particles.frag", &fsl);
vs = CreateShader(vsSrc, vsl, GL_VERTEX_SHADER);
gs = CreateShader(gsSrc, gsl, GL_GEOMETRY_SHADER);
fs = CreateShader(fsSrc, fsl, GL_FRAGMENT_SHADER);
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vs);
glAttachShader(shaderProgram, gs);
glAttachShader(shaderProgram, fs);
glLinkProgram(shaderProgram);
int linkStatus = 0;
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &linkStatus);
if(linkStatus)
MSG("Program linked!");
else
MSG("Can't link the program!");
}
GLuint CreateShader(char * source, int length, GLenum type) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, (const char**) &source, &length);
glCompileShader(shader);
int compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if(compiled)
MSG("Shader compiled!");
else
MSG("Shader not compiled!");
return shader;
} | d231d26c46f4f60c50f516539ef7401d4bb24101.cu | #include <cstdlib>
#include <iostream>
#include <fstream>
#include <ctime>
#include "gl/glew.h"
#include "gl/glut.h"
#include "cuda_runtime.h"
#include "cuda_gl_interop.h"
using namespace std;
#define W_LEFT -150.0f
#define W_RIGHT 150.0f
#define W_BOTTOM -150.0f
#define W_TOP 150.0f
#define GL_ERROR(x, i) std::cout << x << ": Error code -> " << i << std::endl
#define MSG(x) std::cout << (x) << std::endl
#define RND(i) ((float) rand() / RAND_MAX) * (i)
#define NUM_PARTICLES 512 * 15000
#define THREADS_PER_BLOCK 128
#define BLOCKS NUM_PARTICLES / THREADS_PER_BLOCK
#define TIME_SECS (float) clock() / CLOCKS_PER_SEC
#define ACTION_NONE 0
#define ACTION_ATTRACT 1
#define ACTION_REPULSE 2
#define G_CONSTANT 9.8f
#define SHOW_FPS
void InitGL();
void Render();
void Reshape(int, int);
void Keyboard(unsigned char, int, int);
void Mouse(int, int, int, int);
void MouseMotion(int, int);
void InitBuffers();
void InitShaders();
void InitCUDA();
void LastGLError(const char*);
void DrawParticles(float3);
char * LoadTextFile(const char *);
GLuint CreateShader(char*,int,GLenum);
struct Particle {
float2 position;
float2 velocity;
};
const float3 colors[5] = {
make_float3(1.0, 0.2, 0.2),
make_float3(0.7, 0.7, 0.2),
make_float3(0.0, 0.3, 0.8),
make_float3(0.8, 0.2, 0.8),
make_float3(0.2, 1.0, 0.2)
};
GLuint vao, vbo;
GLuint shaderProgram;
cudaGraphicsResource * vboCuda;
int action = ACTION_NONE;
GLfloat actionX, actionY;
__global__ void EvolveParticles(Particle * particles, GLfloat dt, int action, GLfloat actionX, GLfloat actionY) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
// Load particle from global memory
Particle p = particles[index];
p.position.x += p.velocity.x * dt;
p.position.y += p.velocity.y * dt;
if(action == ACTION_ATTRACT) {
float dx = actionX - p.position.x;
float dy = actionY - p.position.y;
float module = sqrtf(dx*dx + dy*dy);
if(module < 1.0f) module = 1.0f;
p.velocity.x += dx / module * G_CONSTANT * 8.0f / module;
p.velocity.y += dy / module * G_CONSTANT * 8.0f / module;
} else if(action == ACTION_REPULSE) {
float dx = actionX - p.position.x;
float dy = actionY - p.position.y;
float module = sqrtf(dx*dx + dy*dy);
if(module < 1.0f) module = 1.0f;
p.velocity.x -= dx / module * G_CONSTANT * 60.0f / module;
p.velocity.y -= dy / module * G_CONSTANT * 60.0f / module;
}
p.velocity.y -= dt * G_CONSTANT;
if(p.position.y < W_BOTTOM && p.velocity.y < 0.0f) {
p.velocity.y = - p.velocity.y * 0.5f;
p.velocity.x = p.velocity.x * 0.8f;
}
else if(p.position.y > W_TOP && p.velocity.y > 0.0f)
p.velocity.y = - p.velocity.y;
else if(p.position.x < W_LEFT && p.velocity.x < 0.0f)
p.velocity.x = - p.velocity.x * 0.8f;
else if(p.position.x > W_RIGHT && p.velocity.x > 0.0f)
p.velocity.x = - p.velocity.x * 0.8f;
// Store updated particle on global memory
particles[index] = p;
}
int main(int argc, char **argv) {
// prima di TUTTO (TUTTO TUTTO TUTTO) creare la finestra sennÚ non funziona una mazza
glutInit(&argc, argv);
// rgba mode, double buffering, depth buffering, stencil buffering
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
// window' top left corner position
glutInitWindowPosition(0,0);
// window's size
glutInitWindowSize(640, 480);
// create window
glutCreateWindow("CUDA Particles");
// Finestra creata... Adesso dovrebbe andare, ma
glewInit();
InitGL();
glutDisplayFunc(Render);
glutReshapeFunc(Reshape);
glutIdleFunc(Render);
glutKeyboardFunc(Keyboard);
glutMouseFunc(Mouse);
glutMotionFunc(MouseMotion);
//glutFullScreen();
glutMainLoop();
}
// called when window is resized
void Reshape(int w, int h) {
glViewport(0,0,w,h); // viewport resize
}
// called when window is drawn
void Render() {
static float prevTime = TIME_SECS;
static float dtsecs = 0.0f;
static float fpsTimer = 0.0f;
static float colorTimer = 0.0f;
static float3 color1, color2, colorResult;
static int colorIndex = 0;
static int fps;
dtsecs = TIME_SECS - prevTime;
prevTime = TIME_SECS;
colorTimer += dtsecs / 10.0f;
if(colorTimer >= 2.0f) {
colorTimer = 0.0f;
if(colorIndex == 4)
colorIndex = 0;
else
colorIndex++;
}
color1 = colors[colorIndex];
color2 = colorIndex == 4 ? colors[0] : colors[colorIndex+1];
if(colorTimer < 1.0f)
colorResult = color1;
else {
float t = colorTimer - 1.0f;
colorResult.x = color2.x * t + color1.x * (1-t);
colorResult.y = color2.y * t + color1.y * (1-t);
colorResult.z = color2.z * t + color1.z * (1-t);
}
#ifdef SHOW_FPS
fpsTimer += dtsecs;
fps++;
if(fpsTimer > 1.0f) {
std::cout << "FPS: " << fps << std::endl;
fpsTimer -= 1.0f;
fps = 0;
}
#endif
Particle * particles;
size_t length;
cudaGraphicsMapResources(1, &vboCuda, 0);
cudaGraphicsResourceGetMappedPointer((void**)&particles, &length, vboCuda);
EvolveParticles<<<BLOCKS, THREADS_PER_BLOCK>>>(particles, dtsecs, action, actionX, actionY);
cudaThreadSynchronize();
cudaGraphicsUnmapResources(1, &vboCuda, 0);
DrawParticles(colorResult);
glutSwapBuffers(); // swap backbuffer with frontbuffer
}
void InitGL() {
// Init opengl(depth test, blending, lighting and so on...)
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE); // additive blending
InitBuffers();
InitCUDA();
InitShaders();
LastGLError("InitGL():");
}
// Called by keyboard events
void Keyboard(unsigned char key, int x, int y) {
if(key == 'q') {
exit(0);
}
}
// Called by mouse events
void Mouse(int button, int state, int x, int y) {
if(state == GLUT_DOWN) {
actionX = (GLfloat) x / glutGet(GLUT_WINDOW_WIDTH) * (W_RIGHT - W_LEFT) + W_LEFT;
actionY = (W_TOP - W_BOTTOM) - (GLfloat) y / glutGet(GLUT_WINDOW_HEIGHT) * (W_TOP - W_BOTTOM) + W_BOTTOM;
if(button == GLUT_LEFT)
action = ACTION_ATTRACT;
else
action = ACTION_REPULSE;
} else
action = ACTION_NONE;
}
void MouseMotion(int x, int y) {
if(action != ACTION_NONE) {
actionX = (GLfloat) x / glutGet(GLUT_WINDOW_WIDTH) * (W_RIGHT - W_LEFT) + W_LEFT;
actionY = (W_TOP - W_BOTTOM) - (GLfloat) y / glutGet(GLUT_WINDOW_HEIGHT) * (W_TOP - W_BOTTOM) + W_BOTTOM;
}
}
void InitBuffers() {
Particle * particles = new Particle[NUM_PARTICLES];
for(int i = 0; i < NUM_PARTICLES; i++) {
particles[i].position.x = RND(W_RIGHT - W_LEFT) + W_LEFT;
particles[i].position.y = RND(W_TOP - W_BOTTOM) + W_BOTTOM;
particles[i].velocity.x = RND(20.0f) - 10.0f;
particles[i].velocity.y = RND(20.0f) - 10.0f;
}
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(Particle) * NUM_PARTICLES, (void*) particles, GL_STATIC_DRAW);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 16, 0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 16, (char*)NULL + 8);
glEnableVertexAttribArray(0);
//glEnableVertexAttribArray(1);
glBindVertexArray(0);
LastGLError("InitBuffers()");
}
void InitCUDA() {
cudaGLSetGLDevice(0);
cudaGraphicsGLRegisterBuffer(&vboCuda, vbo, cudaGraphicsMapFlagsNone);
}
void DrawParticles(float3 color) {
static float tmatrix[16];
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(W_LEFT, W_RIGHT, W_BOTTOM, W_TOP, -1, 1);
glUseProgram(shaderProgram);
int locMat = glGetUniformLocation(shaderProgram, "in_ModelViewProjectionMatrix");
int locColor = glGetUniformLocation(shaderProgram, "in_Color");
glGetFloatv(GL_PROJECTION_MATRIX, tmatrix);
glUniformMatrix4fv(locMat, 1, GL_FALSE, tmatrix);
glUniform4f(locColor, color.x, color.y, color.z, 0.1f);
glBindVertexArray(vao);
glDrawArrays(GL_POINTS, 0, NUM_PARTICLES);
glBindVertexArray(0);
glUseProgram(0);
}
void LastGLError(const char * msg) {
GLuint error = glGetError();
if(error != GL_NO_ERROR)
GL_ERROR(msg, error);
}
char * LoadTextFile(const char * fileName, int * length) {
ifstream file;
char * data = NULL;
int len;
file.open(fileName, ifstream::binary);
file.seekg(0, ios_base::end);
len = file.tellg();
file.seekg(0, ios_base::beg);
data = new char[len];
if(!file.eof())
file.read(data, len);
else
MSG("Bad file!");
file.close();
*length = len;
return data;
}
void InitShaders() {
GLuint vs, gs, fs;
int vsl, gsl, fsl;
char *vsSrc, *gsSrc, *fsSrc;
vsSrc = LoadTextFile("particles.vert", &vsl);
gsSrc = LoadTextFile("particles.geom", &gsl);
fsSrc = LoadTextFile("particles.frag", &fsl);
vs = CreateShader(vsSrc, vsl, GL_VERTEX_SHADER);
gs = CreateShader(gsSrc, gsl, GL_GEOMETRY_SHADER);
fs = CreateShader(fsSrc, fsl, GL_FRAGMENT_SHADER);
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vs);
glAttachShader(shaderProgram, gs);
glAttachShader(shaderProgram, fs);
glLinkProgram(shaderProgram);
int linkStatus = 0;
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &linkStatus);
if(linkStatus)
MSG("Program linked!");
else
MSG("Can't link the program!");
}
GLuint CreateShader(char * source, int length, GLenum type) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, (const char**) &source, &length);
glCompileShader(shader);
int compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if(compiled)
MSG("Shader compiled!");
else
MSG("Shader not compiled!");
return shader;
} |
050fc65f20a634422cd557f33a476ec6c38f4b7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020, Lorenzo Basso, Jack Lee, Matthew Zhang, Feiyang Chen
* Copyright (c) 2018, Francis Haghighi-Daly
* All rights reserved.
* This file is part of the WooStOr - Wavepacket prOpopgatiOn using SpliT OperatR method, subject to the GNU/GPL-3.0-or-later.*/
#include <mex.h>
#include <matrix.h>
#include <math.h>
#include "../MEX_helpers/complex.h"
#include "../MEX_helpers/cuda_helper.h"
__global__ void copy_complex_array(myComplex *dest, double *real, double *imag, size_t size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
myComplex w;
w.x = real[tid];
w.y = imag[tid];
dest[tid] = w;
tid += blockDim.x * gridDim.x;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
long long dest_ptr = mxGetScalar(prhs[0]);
double *source_real = mxGetPr(prhs[1]);
double *source_imag = mxGetPi(prhs[1]);
size_t size = mxGetScalar(prhs[2]);
myComplex *dest = reinterpret_cast<myComplex *>(dest_ptr);
// Allocate the space on the GPU
double *dev_source_real, *dev_source_imag;
hipMallocManaged(reinterpret_cast<void **>(&dev_source_real), size * sizeof(double));
hipMallocManaged(reinterpret_cast<void **>(&dev_source_imag), size * sizeof(double));
// Copy input data to GPU
hipMemcpy(dev_source_real, source_real, size * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_source_imag, source_imag, size * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( copy_complex_array), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dest, dev_source_real, dev_source_imag, size);
hipFree(dev_source_real);
hipFree(dev_source_imag);
}
| 050fc65f20a634422cd557f33a476ec6c38f4b7c.cu | /* Copyright (c) 2020, Lorenzo Basso, Jack Lee, Matthew Zhang, Feiyang Chen
* Copyright (c) 2018, Francis Haghighi-Daly
* All rights reserved.
* This file is part of the WooStOr - Wavepacket prOpopgatiOn using SpliT OperatR method, subject to the GNU/GPL-3.0-or-later.*/
#include <mex.h>
#include <matrix.h>
#include <math.h>
#include "../MEX_helpers/complex.h"
#include "../MEX_helpers/cuda_helper.h"
__global__ void copy_complex_array(myComplex *dest, double *real, double *imag, size_t size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
myComplex w;
w.x = real[tid];
w.y = imag[tid];
dest[tid] = w;
tid += blockDim.x * gridDim.x;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
long long dest_ptr = mxGetScalar(prhs[0]);
double *source_real = mxGetPr(prhs[1]);
double *source_imag = mxGetPi(prhs[1]);
size_t size = mxGetScalar(prhs[2]);
myComplex *dest = reinterpret_cast<myComplex *>(dest_ptr);
// Allocate the space on the GPU
double *dev_source_real, *dev_source_imag;
cudaMallocManaged(reinterpret_cast<void **>(&dev_source_real), size * sizeof(double));
cudaMallocManaged(reinterpret_cast<void **>(&dev_source_imag), size * sizeof(double));
// Copy input data to GPU
cudaMemcpy(dev_source_real, source_real, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_source_imag, source_imag, size * sizeof(double), cudaMemcpyHostToDevice);
copy_complex_array<<<NUM_BLOCKS, NUM_THREADS>>>(dest, dev_source_real, dev_source_imag, size);
cudaFree(dev_source_real);
cudaFree(dev_source_imag);
}
|
658867880e62ce9f15afd0530c2646b4663a5740.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* cudautils.cu
*
* Created on: Apr 14, 2011
* Author: zhmurov
*/
#include "cudautils.cuh"
#include "../Core/global.h"
__global__ void reduce_kernel(float* d_data, int d_output){
__shared__ float s_data[REDUCE_BLOCKSIZE];
if(blockIdx.x*blockDim.x + threadIdx.x < c_gsystem.N){
int s_i = threadIdx.x;
int i = 2 * blockIdx.x*blockDim.x + threadIdx.x;
s_data[s_i] = d_data[i] + d_data[i + blockDim.x];
__syncthreads();
int s;
for(s = blockDim.x/2; s > 32; s>>=1){
if(s_i < s){
s_data[s_i] += d_data[s_i + s];
}
__syncthreads();
}
if(s_i < 32){
s_data[s_i] += d_data[s_i + 32];
s_data[s_i] += d_data[s_i + 16];
s_data[s_i] += d_data[s_i + 8];
s_data[s_i] += d_data[s_i + 4];
s_data[s_i] += d_data[s_i + 2];
s_data[s_i] += d_data[s_i + 1];
}
if(s_i == 0){
d_output[blockIdx.x] = s_data[0];
}
}
}
float reduce(float* d_data, int N){
int blockNum = N/REDUCE_BLOCKSIZE + 1;
float result = 0;
int i;
if(d_sums == NULL){
allocateGPU((void**)&d_sums, blockNum*sizeof(float));
allocateCPU((void**)&d_sums, blockNum*sizeof(float));
for(i = 0; i < blockNum; i++){
d_sums[i] = 0.0f;
}
}
hipLaunchKernelGGL(( reduce_kernel), dim3(blockNum), dim3(REDUCE_BLOCKSIZE), 0, 0, d_data, d_sums);
if(blockNum > REDUCE_BLOCKSIZE){
result = reduce(d_sums, blockNum);
} else {
hipMemcpy(h_sums, d_sums, blockNum*sizeof(float), hipMemcpyDeviceToHost);
for(i = 0; i < blockNum; i++){
result += h_sums[i];
}
}
return result;
}
| 658867880e62ce9f15afd0530c2646b4663a5740.cu | /*
* cudautils.cu
*
* Created on: Apr 14, 2011
* Author: zhmurov
*/
#include "cudautils.cuh"
#include "../Core/global.h"
__global__ void reduce_kernel(float* d_data, int d_output){
__shared__ float s_data[REDUCE_BLOCKSIZE];
if(blockIdx.x*blockDim.x + threadIdx.x < c_gsystem.N){
int s_i = threadIdx.x;
int i = 2 * blockIdx.x*blockDim.x + threadIdx.x;
s_data[s_i] = d_data[i] + d_data[i + blockDim.x];
__syncthreads();
int s;
for(s = blockDim.x/2; s > 32; s>>=1){
if(s_i < s){
s_data[s_i] += d_data[s_i + s];
}
__syncthreads();
}
if(s_i < 32){
s_data[s_i] += d_data[s_i + 32];
s_data[s_i] += d_data[s_i + 16];
s_data[s_i] += d_data[s_i + 8];
s_data[s_i] += d_data[s_i + 4];
s_data[s_i] += d_data[s_i + 2];
s_data[s_i] += d_data[s_i + 1];
}
if(s_i == 0){
d_output[blockIdx.x] = s_data[0];
}
}
}
float reduce(float* d_data, int N){
int blockNum = N/REDUCE_BLOCKSIZE + 1;
float result = 0;
int i;
if(d_sums == NULL){
allocateGPU((void**)&d_sums, blockNum*sizeof(float));
allocateCPU((void**)&d_sums, blockNum*sizeof(float));
for(i = 0; i < blockNum; i++){
d_sums[i] = 0.0f;
}
}
reduce_kernel<<<blockNum, REDUCE_BLOCKSIZE>>>(d_data, d_sums);
if(blockNum > REDUCE_BLOCKSIZE){
result = reduce(d_sums, blockNum);
} else {
cudaMemcpy(h_sums, d_sums, blockNum*sizeof(float), cudaMemcpyDeviceToHost);
for(i = 0; i < blockNum; i++){
result += h_sums[i];
}
}
return result;
}
|
1904407ad1d9d4dfc9b29f7f564856fbd75069e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
int g_ptx_version;
int g_sm_count;
double g_device_giga_bandwidth;
bool g_verbose = false;
bool g_verbose_input = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CUB_SEGMENTED, // CUB segmented method
CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
THRUST, // Thrust method
};
// Custom max functor
struct CustomMax
{
/// Boolean max operator, returns <tt>(a > b) ? a : b</tt>
template <typename OutputT>
__host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b)
{
return CUB_MAX(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Sum reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Min reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Max reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::ArgMin reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::ArgMax reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSegmentedReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Sum reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Min reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::Max reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::ArgMin reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
hipcub::ArgMax reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduction entrypoint (min or max specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
OutputT init;
CubDebugExit(hipMemcpy(&init, d_in + 0, sizeof(OutputT), hipMemcpyDeviceToHost));
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(hipMemcpy(d_out, &retval, sizeof(OutputT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to reduction entrypoint (sum specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
Sum reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(hipMemcpy(d_out, &retval, sizeof(OutputT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceReduce
*/
template <
typename InputIteratorT,
typename OutputIteratorT,
typename OffsetIteratorT,
typename ReductionOpT>
__global__ void CnpDispatchKernel(
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CUB_CDP kernel
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB_CDP> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
hipLaunchKernelGGL(( CnpDispatchKernel), dim3(1),dim3(1), 0, 0, timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost));
// Copy out error
hipError_t retval;
CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Initialize problem
template <typename InputT>
void Initialize(
GenMode gen_mode,
InputT *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
}
if (g_verbose_input)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/// Solve problem (max/custom-max functor)
template <typename ReductionOpT, typename InputT, typename _OutputT>
struct Solution
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
ReductionOpT reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (min functor)
template <typename InputT, typename _OutputT>
struct Solution<hipcub::Min, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::Min reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (sum functor)
template <typename InputT, typename _OutputT>
struct Solution<hipcub::Sum, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::Sum reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate;
InitValue(INTEGER_SEED, aggregate, 0);
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmin functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<hipcub::ArgMin, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::ArgMin reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmax functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<hipcub::ArgMax, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
hipcub::ArgMax reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Test DeviceReduce for a given problem input
template <
typename BackendT,
typename DeviceInputIteratorT,
typename DeviceOutputIteratorT,
typename HostReferenceIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void Test(
BackendT backend,
DeviceInputIteratorT d_in,
DeviceOutputIteratorT d_out,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
HostReferenceIteratorT h_reference)
{
// Input data types
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputT;
// Allocate CUB_CDP device arrays for temp storage size and error
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Inquire temp device storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Allocate temp device storage
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(InputT);
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak",
avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare);
}
/// Test DeviceReduce
template <
Backend BACKEND,
typename OutputValueT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void SolveAndTest(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op)
{
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputValueT;
typedef Solution<ReductionOpT, InputValueT, OutputValueT> SolutionT;
typedef typename SolutionT::OutputT OutputT;
printf("\n\n%s hipcub::DeviceReduce<%s> %d items (%s), %d segments\n",
(BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB",
typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments);
fflush(stdout);
// Allocate and solve solution
OutputT *h_reference = new OutputT[num_segments];
SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op);
// // Run with discard iterator
// DiscardOutputIterator<OffsetT> discard_itr;
// Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Run with output data (cleared for sanity-check)
OutputT *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments));
CubDebugExit(hipMemset(d_out, 0, sizeof(OutputT) * num_segments));
Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (h_reference) delete[] h_reference;
}
/// Test specific problem type
template <
Backend BACKEND,
typename InputT,
typename OutputT,
typename OffsetT,
typename ReductionOpT>
void TestProblem(
OffsetT num_items,
OffsetT num_segments,
GenMode gen_mode,
ReductionOpT reduction_op)
{
printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
fflush(stdout);
// Initialize value data
InputT* h_in = new InputT[num_items];
Initialize(gen_mode, h_in, num_items);
// Initialize segment data
OffsetT *h_segment_offsets = new OffsetT[num_segments + 1];
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
// Initialize device data
OffsetT *d_segment_offsets = NULL;
InputT *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice));
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op);
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/// Test different operators
template <
Backend BACKEND,
typename OutputT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT>
void TestByOp(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets)
{
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax());
}
/// Test different backends
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByBackend(
OffsetT num_items,
OffsetT max_segments,
GenMode gen_mode)
{
// Initialize host data
printf("\n\nInitializing %d %s -> %s (gen mode %d)... ",
num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
InputT *h_in = new InputT[num_items];
OffsetT *h_segment_offsets = new OffsetT[max_segments + 1];
Initialize(gen_mode, h_in, num_items);
// Initialize device data
InputT *d_in = NULL;
OffsetT *d_segment_offsets = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice));
//
// Test single-segment implementations
//
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
// Page-aligned-input tests
TestByOp<CUB, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Host-dispatch
#ifdef CUB_CDP
TestByOp<CUB_CDP, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Device-dispatch
#endif
// Non-page-aligned-input tests
if (num_items > 1)
{
InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input);
TestByOp<CUB, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, (OffsetT*) NULL);
}
//
// Test segmented implementation
//
// Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment
int max_items_per_segment = 128000;
for (int num_segments = (num_items + max_items_per_segment - 1) / max_items_per_segment;
num_segments < max_segments;
num_segments = (num_segments * 32) + 1)
{
// Test with segment pointer
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice));
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets);
// Test with segment iterator
typedef CastOp<OffsetT> IdentityOpT;
IdentityOpT identity_op;
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr(
h_segment_offsets,
identity_op);
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr(
d_segment_offsets,
identity_op);
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets_itr, d_segment_offsets_itr);
}
if (h_in) delete[] h_in;
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
}
/// Test different input-generation modes
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByGenMode(
OffsetT num_items,
OffsetT max_segments)
{
//
// Test pointer support using different input-generation modes
//
TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM);
TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED);
TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM);
//
// Test iterator support using a constant-iterator and SUM
//
InputT val;
InitValue(UNIFORM, val, 0);
ConstantInputIterator<InputT, OffsetT> h_in(val);
OffsetT *h_segment_offsets = new OffsetT[1 + 1];
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
SolveAndTest<CUB, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#ifdef CUB_CDP
SolveAndTest<CUB_CDP, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#endif
if (h_segment_offsets) delete[] h_segment_offsets;
}
/// Test different problem sizes
template <
typename InputT,
typename OutputT,
typename OffsetT>
struct TestBySize
{
OffsetT max_items;
OffsetT max_segments;
TestBySize(OffsetT max_items, OffsetT max_segments) :
max_items(max_items),
max_segments(max_segments)
{}
template <typename ActivePolicyT>
hipError_t Invoke()
{
//
// Black-box testing on all backends
//
// Test 0, 1, many
TestByGenMode<InputT, OutputT>(0, max_segments);
TestByGenMode<InputT, OutputT>(1, max_segments);
TestByGenMode<InputT, OutputT>(max_items, max_segments);
// Test random problem sizes from a log-distribution [8, max_items-ish)
int num_iterations = 8;
double max_exp = log(double(max_items)) / log(double(2.0));
for (int i = 0; i < num_iterations; ++i)
{
OffsetT num_items = (OffsetT) pow(2.0, RandomValue(max_exp - 3.0) + 3.0);
TestByGenMode<InputT, OutputT>(num_items, max_segments);
}
//
// White-box testing of single-segment problems around specific sizes
//
// Tile-boundaries: multiple blocks, one tile per block
OffsetT tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD;
TestProblem<CUB, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum());
// Tile-boundaries: multiple blocks, multiple tiles per block
OffsetT sm_occupancy = 32;
OffsetT occupancy = tile_size * sm_occupancy * g_sm_count;
TestProblem<CUB, InputT, OutputT>(occupancy, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum());
return hipSuccess;
}
};
/// Test problem type
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestType(
OffsetT max_items,
OffsetT max_segments)
{
typedef typename DeviceReducePolicy<InputT, OutputT, OffsetT, hipcub::Sum>::MaxPolicy MaxPolicyT;
TestBySize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments);
MaxPolicyT::Invoke(g_ptx_version, dispatch);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
typedef int OffsetT;
OffsetT max_items = 27000000;
OffsetT max_segments = 34000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", max_items);
args.GetCmdLineArgument("s", max_segments);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--s=<num segments> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
// Get ptx version
CubDebugExit(PtxVersion(g_ptx_version));
// Get SM count
g_sm_count = args.deviceProp.multiProcessorCount;
std::numeric_limits<float>::max();
#ifdef QUICKER_TEST
// Compile/run basic test
TestProblem<CUB, char, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, short, int>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, long long, long long>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, float, float>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, double, double>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB_SEGMENTED, int, int>(max_items, max_segments, RANDOM_BIT, Sum());
#elif defined(QUICK_TEST)
// Compile/run quick comparison tests
TestProblem<CUB, char, char>( max_items * 4, 1, UNIFORM, Sum());
TestProblem<THRUST, char, char>( max_items * 4, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, short, short>( max_items * 2, 1, UNIFORM, Sum());
TestProblem<THRUST, short, short>( max_items * 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, UNIFORM, Sum());
TestProblem<THRUST, int, int>( max_items, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
TestProblem<THRUST, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
TestProblem<THRUST, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
TestType<char, char>(max_items, max_segments);
TestType<unsigned char, unsigned char>(max_items, max_segments);
TestType<char, int>(max_items, max_segments);
TestType<short, short>(max_items, max_segments);
TestType<int, int>(max_items, max_segments);
TestType<long, long>(max_items, max_segments);
TestType<long long, long long>(max_items, max_segments);
TestType<uchar2, uchar2>(max_items, max_segments);
TestType<uint2, uint2>(max_items, max_segments);
TestType<ulonglong2, ulonglong2>(max_items, max_segments);
TestType<ulonglong4, ulonglong4>(max_items, max_segments);
TestType<TestFoo, TestFoo>(max_items, max_segments);
TestType<TestBar, TestBar>(max_items, max_segments);
}
#endif
printf("\n");
return 0;
}
| 1904407ad1d9d4dfc9b29f7f564856fbd75069e4.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <typeinfo>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <cub/util_allocator.cuh>
#include <cub/device/device_reduce.cuh>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
int g_ptx_version;
int g_sm_count;
double g_device_giga_bandwidth;
bool g_verbose = false;
bool g_verbose_input = false;
int g_timing_iterations = 0;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CUB_SEGMENTED, // CUB segmented method
CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
THRUST, // Thrust method
};
// Custom max functor
struct CustomMax
{
/// Boolean max operator, returns <tt>(a > b) ? a : b</tt>
template <typename OutputT>
__host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b)
{
return CUB_MAX(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Sum reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Min reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Max reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::ArgMin reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::ArgMax reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSegmentedReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Sum reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Min reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::Max reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::ArgMin reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
cub::ArgMax reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1,
stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduction entrypoint (min or max specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
OutputT init;
CubDebugExit(cudaMemcpy(&init, d_in + 0, sizeof(OutputT), cudaMemcpyDeviceToHost));
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to reduction entrypoint (sum specialization)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT>
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
Sum reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_in_wrapper(d_in);
OutputT retval;
for (int i = 0; i < timing_iterations; ++i)
{
retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items);
}
if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE)
CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceReduce
*/
template <
typename InputIteratorT,
typename OutputIteratorT,
typename OffsetIteratorT,
typename ReductionOpT>
__global__ void CnpDispatchKernel(
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CUB_CDP kernel
*/
template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB_CDP> dispatch_to,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<<<1,1>>>(timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));
// Copy out error
cudaError_t retval;
CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Initialize problem
template <typename InputT>
void Initialize(
GenMode gen_mode,
InputT *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
}
if (g_verbose_input)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/// Solve problem (max/custom-max functor)
template <typename ReductionOpT, typename InputT, typename _OutputT>
struct Solution
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
ReductionOpT reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (min functor)
template <typename InputT, typename _OutputT>
struct Solution<cub::Min, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::Min reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (sum functor)
template <typename InputT, typename _OutputT>
struct Solution<cub::Sum, InputT, _OutputT>
{
typedef _OutputT OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::Sum reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate;
InitValue(INTEGER_SEED, aggregate, 0);
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmin functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<cub::ArgMin, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::ArgMin reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmax functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<cub::ArgMax, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets,
cub::ArgMax reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j)
{
OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Test DeviceReduce for a given problem input
template <
typename BackendT,
typename DeviceInputIteratorT,
typename DeviceOutputIteratorT,
typename HostReferenceIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void Test(
BackendT backend,
DeviceInputIteratorT d_in,
DeviceOutputIteratorT d_out,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op,
HostReferenceIteratorT h_reference)
{
// Input data types
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputT;
// Allocate CUB_CDP device arrays for temp storage size and error
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Inquire temp device storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Allocate temp device storage
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, true));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_offsets,
reduction_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(InputT);
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak",
avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare);
}
/// Test DeviceReduce
template <
Backend BACKEND,
typename OutputValueT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT,
typename ReductionOpT>
void SolveAndTest(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets,
ReductionOpT reduction_op)
{
typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputValueT;
typedef Solution<ReductionOpT, InputValueT, OutputValueT> SolutionT;
typedef typename SolutionT::OutputT OutputT;
printf("\n\n%s cub::DeviceReduce<%s> %d items (%s), %d segments\n",
(BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB",
typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments);
fflush(stdout);
// Allocate and solve solution
OutputT *h_reference = new OutputT[num_segments];
SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op);
// // Run with discard iterator
// DiscardOutputIterator<OffsetT> discard_itr;
// Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Run with output data (cleared for sanity-check)
OutputT *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments));
CubDebugExit(cudaMemset(d_out, 0, sizeof(OutputT) * num_segments));
Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, h_reference);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (h_reference) delete[] h_reference;
}
/// Test specific problem type
template <
Backend BACKEND,
typename InputT,
typename OutputT,
typename OffsetT,
typename ReductionOpT>
void TestProblem(
OffsetT num_items,
OffsetT num_segments,
GenMode gen_mode,
ReductionOpT reduction_op)
{
printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
fflush(stdout);
// Initialize value data
InputT* h_in = new InputT[num_items];
Initialize(gen_mode, h_in, num_items);
// Initialize segment data
OffsetT *h_segment_offsets = new OffsetT[num_segments + 1];
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
// Initialize device data
OffsetT *d_segment_offsets = NULL;
InputT *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice));
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op);
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/// Test different operators
template <
Backend BACKEND,
typename OutputT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename OffsetIteratorT>
void TestByOp(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
OffsetIteratorT h_segment_offsets,
OffsetIteratorT d_segment_offsets)
{
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax());
}
/// Test different backends
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByBackend(
OffsetT num_items,
OffsetT max_segments,
GenMode gen_mode)
{
// Initialize host data
printf("\n\nInitializing %d %s -> %s (gen mode %d)... ",
num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
InputT *h_in = new InputT[num_items];
OffsetT *h_segment_offsets = new OffsetT[max_segments + 1];
Initialize(gen_mode, h_in, num_items);
// Initialize device data
InputT *d_in = NULL;
OffsetT *d_segment_offsets = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice));
//
// Test single-segment implementations
//
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
// Page-aligned-input tests
TestByOp<CUB, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Host-dispatch
#ifdef CUB_CDP
TestByOp<CUB_CDP, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Device-dispatch
#endif
// Non-page-aligned-input tests
if (num_items > 1)
{
InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input);
TestByOp<CUB, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, (OffsetT*) NULL);
}
//
// Test segmented implementation
//
// Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment
int max_items_per_segment = 128000;
for (int num_segments = (num_items + max_items_per_segment - 1) / max_items_per_segment;
num_segments < max_segments;
num_segments = (num_segments * 32) + 1)
{
// Test with segment pointer
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice));
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets);
// Test with segment iterator
typedef CastOp<OffsetT> IdentityOpT;
IdentityOpT identity_op;
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr(
h_segment_offsets,
identity_op);
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr(
d_segment_offsets,
identity_op);
TestByOp<CUB_SEGMENTED, OutputT>(
h_in, d_in, num_items, num_segments, h_segment_offsets_itr, d_segment_offsets_itr);
}
if (h_in) delete[] h_in;
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
}
/// Test different input-generation modes
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByGenMode(
OffsetT num_items,
OffsetT max_segments)
{
//
// Test pointer support using different input-generation modes
//
TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM);
TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED);
TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM);
//
// Test iterator support using a constant-iterator and SUM
//
InputT val;
InitValue(UNIFORM, val, 0);
ConstantInputIterator<InputT, OffsetT> h_in(val);
OffsetT *h_segment_offsets = new OffsetT[1 + 1];
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
SolveAndTest<CUB, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#ifdef CUB_CDP
SolveAndTest<CUB_CDP, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum());
#endif
if (h_segment_offsets) delete[] h_segment_offsets;
}
/// Test different problem sizes
template <
typename InputT,
typename OutputT,
typename OffsetT>
struct TestBySize
{
OffsetT max_items;
OffsetT max_segments;
TestBySize(OffsetT max_items, OffsetT max_segments) :
max_items(max_items),
max_segments(max_segments)
{}
template <typename ActivePolicyT>
cudaError_t Invoke()
{
//
// Black-box testing on all backends
//
// Test 0, 1, many
TestByGenMode<InputT, OutputT>(0, max_segments);
TestByGenMode<InputT, OutputT>(1, max_segments);
TestByGenMode<InputT, OutputT>(max_items, max_segments);
// Test random problem sizes from a log-distribution [8, max_items-ish)
int num_iterations = 8;
double max_exp = log(double(max_items)) / log(double(2.0));
for (int i = 0; i < num_iterations; ++i)
{
OffsetT num_items = (OffsetT) pow(2.0, RandomValue(max_exp - 3.0) + 3.0);
TestByGenMode<InputT, OutputT>(num_items, max_segments);
}
//
// White-box testing of single-segment problems around specific sizes
//
// Tile-boundaries: multiple blocks, one tile per block
OffsetT tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD;
TestProblem<CUB, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum());
// Tile-boundaries: multiple blocks, multiple tiles per block
OffsetT sm_occupancy = 32;
OffsetT occupancy = tile_size * sm_occupancy * g_sm_count;
TestProblem<CUB, InputT, OutputT>(occupancy, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum());
TestProblem<CUB, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum());
return cudaSuccess;
}
};
/// Test problem type
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestType(
OffsetT max_items,
OffsetT max_segments)
{
typedef typename DeviceReducePolicy<InputT, OutputT, OffsetT, cub::Sum>::MaxPolicy MaxPolicyT;
TestBySize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments);
MaxPolicyT::Invoke(g_ptx_version, dispatch);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
typedef int OffsetT;
OffsetT max_items = 27000000;
OffsetT max_segments = 34000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", max_items);
args.GetCmdLineArgument("s", max_segments);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--s=<num segments> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"[--cdp]"
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
// Get ptx version
CubDebugExit(PtxVersion(g_ptx_version));
// Get SM count
g_sm_count = args.deviceProp.multiProcessorCount;
std::numeric_limits<float>::max();
#ifdef QUICKER_TEST
// Compile/run basic test
TestProblem<CUB, char, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, short, int>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, long long, long long>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB, float, float>( max_items, 1, RANDOM_BIT, Sum());
TestProblem<CUB, double, double>( max_items, 1, RANDOM_BIT, Sum());
printf("\n-------------------------------\n");
TestProblem<CUB_SEGMENTED, int, int>(max_items, max_segments, RANDOM_BIT, Sum());
#elif defined(QUICK_TEST)
// Compile/run quick comparison tests
TestProblem<CUB, char, char>( max_items * 4, 1, UNIFORM, Sum());
TestProblem<THRUST, char, char>( max_items * 4, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, short, short>( max_items * 2, 1, UNIFORM, Sum());
TestProblem<THRUST, short, short>( max_items * 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, int, int>( max_items, 1, UNIFORM, Sum());
TestProblem<THRUST, int, int>( max_items, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
TestProblem<THRUST, long long, long long>( max_items / 2, 1, UNIFORM, Sum());
printf("\n----------------------------\n");
TestProblem<CUB, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
TestProblem<THRUST, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
TestType<char, char>(max_items, max_segments);
TestType<unsigned char, unsigned char>(max_items, max_segments);
TestType<char, int>(max_items, max_segments);
TestType<short, short>(max_items, max_segments);
TestType<int, int>(max_items, max_segments);
TestType<long, long>(max_items, max_segments);
TestType<long long, long long>(max_items, max_segments);
TestType<uchar2, uchar2>(max_items, max_segments);
TestType<uint2, uint2>(max_items, max_segments);
TestType<ulonglong2, ulonglong2>(max_items, max_segments);
TestType<ulonglong4, ulonglong4>(max_items, max_segments);
TestType<TestFoo, TestFoo>(max_items, max_segments);
TestType<TestBar, TestBar>(max_items, max_segments);
}
#endif
printf("\n");
return 0;
}
|
17c06d590d20953366c1996989bde043c2935b4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "FVL/FVLib.h"
#include "FVL/FVXMLWriter.h"
#include "FVL/FVArray.h"
#include "FVio.h"
#include "FVL/FVParameters.h"
using namespace std;
#ifdef NO_CUDA
#include "kernels_cpu.h"
#else
#include <hip/hip_runtime.h>
#include "kernels_hip.cuh"
#endif
#define BLOCK_SIZE_FLUX 512
#define BLOCK_SIZE_UPDATE 512
#define GRID_SIZE(elems, threads) ((int) ::ceil((double)elems/threads))
typedef struct _parameters {
string mesh_file;
string velocity_file;
string initial_file;
string output_file;
double final_time;
double anim_time;
int anim_jump;
double dirichlet;
double CFL;
} Parameters;
#define _USE_MATH_DEFINES
#include <math.h>
#include <limits>
#include <set>
void prepare_mesh_test_data(CFVMesh2D &mesh, CFVArray<double> &polution) {
double min_x = std::numeric_limits<double>::max();
double max_x = std::numeric_limits<double>::min();
/* find min and max x coords of the mesh edges */
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
double current = mesh.edge_centroids.x[i];
if (current < min_x) min_x = current;
if (current > max_x) max_x = current;
}
cout << endl << "Linking mesh ends" << endl;
/* This assumes the mesh is rectangular, and we want to connect the left side with the right side
* that is, for every edge E with x = min_x, and no right cell, make the right cell equal to the left cell of the corresponding edge on the right side, and vice-versa
**/
set<unsigned int> left_cells;
set<unsigned int> right_cells;
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
if (mesh.edge_types[i] == FV_EDGE_DIRICHLET) {
if (mesh.edge_centroids.x[i] == min_x)
left_cells.insert(i);
if (mesh.edge_centroids.x[i] == max_x)
right_cells.insert(i);
}
}
set<unsigned int>::iterator left_it, right_it;
for(left_it = left_cells.begin(), right_it = right_cells.begin();
left_it != left_cells.end();
++left_it, ++right_it) {
unsigned int l = *left_it;
unsigned int r = *right_it;
/* set edges type to regular */
mesh.edge_types[l] = FV_EDGE;
mesh.edge_types[r] = FV_EDGE;
/* link both edges */
mesh.edge_right_cells[l] = mesh.edge_left_cells[r];
mesh.edge_right_cells[r] = mesh.edge_left_cells[l];
cout << "linking edge " << *left_it << " with " << *right_it << endl;
}
cout << "Linked " << left_cells.size() << " pairs of edges " << endl << endl;
}
// TODO: interface decente para paremetros xml
Parameters read_parameters (string parameters_filename) {
Parameters data;
FVParameters para(parameters_filename);
data.mesh_file = para.getString("MeshName");
data.velocity_file = para.getString("VelocityFile");
data.initial_file = para.getString("PoluInitFile");
data.output_file = para.getString("OutputFile");
data.final_time = para.getDouble("FinalTime");
data.anim_time = para.getDouble("AnimTimeStep");
data.anim_jump = para.getInteger("NbJump");
data.dirichlet = para.getDouble("DirichletCondition");
data.CFL = para.getDouble("CFL");
return data;
}
// TODO: convert to cuda
double cpu_compute_mesh_parameter(CFVMesh2D &mesh) {
double h;
double S;
h = 1.e20;
for(unsigned int cell = 0; cell < mesh.num_cells; ++cell) {
S = mesh.cell_areas[cell];
for(unsigned int edge = 0; edge < mesh.cell_edges_count[cell]; ++edge) {
double length = mesh.edge_lengths[edge];
if (h * length > S)
h = S / length;
}
}
return h;
}
void cpu_compute_edge_velocities(CFVMesh2D &mesh, CFVPoints2D<double> &velocities, CFVArray<double> &vs, double &v_max) {
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
unsigned int left = mesh.edge_left_cells[i];
unsigned int right = mesh.edge_right_cells[i];
if (right == NO_RIGHT_CELL)
right = left;
double v = ((velocities.x[left] + velocities.x[right]) * 0.5 * mesh.edge_normals.x[i])
+ ((velocities.y[left] + velocities.y[right]) * 0.5 * mesh.edge_normals.y[i]);
vs[i] = v;
if (abs(v) > v_max || i == 0) {
v_max = abs(v);
}
}
}
void cudaSafe(hipError_t error, const string msg) {
if (error != hipSuccess) {
cerr << "Error: " << msg << " : " << error << endl;
exit(-1);
}
}
void cudaCheckError(const string msg) {
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
cerr << "Error: " << msg << " : " << hipGetErrorString(error) << endl;
exit(-1);
}
}
int main(int argc, char **argv) {
#ifdef NO_CUDA
cout << "Running in NO_CUDA mode" << endl;
#endif
// var declaration
int i = 0;
double h, t, dt, v_max = 0;
string name;
// read params
Parameters data;
if (argc != 2) {
cerr << "Arg warning: no xml param filename specified. Defaulting to param.xml" << endl;
data = read_parameters("param.xml");
} else
data = read_parameters(argv[1]);
// read mesh
FVL::CFVMesh2D mesh(data.mesh_file);
FVL::CFVPoints2D<double> velocities(mesh.num_cells);
FVL::CFVArray<double> polution(mesh.num_cells);
FVL::CFVArray<double> flux(mesh.num_edges);
FVL::CFVArray<double> vs(mesh.num_edges);
// read other input files
FVL::FVXMLReader velocity_reader(data.velocity_file);
FVL::FVXMLReader polu_ini_reader(data.initial_file);
polu_ini_reader.getVec(polution, t, name);
velocity_reader.getPoints2D(velocities, t, name);
polu_ini_reader.close();
velocity_reader.close();
/* assign test value for polution */
prepare_mesh_test_data(mesh, polution);
FVL::FVXMLWriter polution_writer(data.output_file);
polution_writer.append(polution, t, "polution");
// compute velocity vector
// TODO: Convert to CUDA
cpu_compute_edge_velocities(mesh, velocities, vs, v_max);
h = cpu_compute_mesh_parameter(mesh);
// TODO trocar 1.0 por parametro CFL (com valores entre 0 e 1, 0.3 para esquema de ordem 2)
dt = data.CFL / v_max * h;
#ifndef NO_CUDA
// saves whole mesh to CUDA memory
mesh.cuda_malloc();
polution.cuda_malloc();
flux.cuda_malloc();
vs.cuda_malloc();
// data copy
hipStream_t stream;
hipStreamCreate(&stream);
mesh.cuda_save(stream);
polution.cuda_save(stream);
vs.cuda_save(stream);
// sizes of each kernel
// TODO: mudar BLOCK_SIZE_FLUX para MAT_A
dim3 grid_flux(GRID_SIZE(mesh.num_edges, BLOCK_SIZE_FLUX), 1, 1);
dim3 block_flux(BLOCK_SIZE_FLUX, 1, 1);
dim3 grid_update(GRID_SIZE(mesh.num_cells, BLOCK_SIZE_UPDATE), 1, 1);
dim3 block_update(BLOCK_SIZE_UPDATE, 1, 1);
#endif
bool finished = false;
double anim_next_step = data.anim_time;
cout << "dt= " << dt << endl;
while (!finished) {
//while(t < data.final_time) {
cout << "time: " << t << " iteration: " << i << "\r";
if (t + dt > data.final_time) {
cout << endl << "Final iteration, adjusting dt" << endl;
dt = data.final_time - t;
finished = true;
}
// Cpu version
#ifdef NO_CUDA
/* compute flux */
cpu_compute_flux(mesh, vs, polution, flux, data.dirichlet);
/* update */
cpu_update(mesh, polution, flux, dt);
#else
//kernel_compute_flux<<< grid_flux, block_flux >>>(mesh.cuda_get(), polution.cuda_get(), vs.cuda_get(), flux.cuda_get(), data.dirichlet);
_DEBUG {
stringstream ss;
ss << "cuda[compute_flux] i=" << i;
cudaCheckError(ss.str());
}
//kernel_update<<< grid_update, block_update >>>(mesh.cuda_get(), polution.cuda_get(), flux.cuda_get(), data.dirichlet);
_DEBUG {
stringstream ss;
ss << "cuda[update] i=" << i;
cudaCheckError(ss.str());
}
#endif
t += dt;
if (t >= anim_next_step) {
#ifndef NO_CUDA
polution.cuda_get();
#endif
polution_writer.append(polution, t, "polution");
anim_next_step += data.anim_time;
}
++i;
}
polution_writer.save();
polution_writer.close();
#ifndef NO_CUDA
polution.cuda_free();
flux.cuda_free();
#endif
cout << endl << "exiting" << endl;
}
| 17c06d590d20953366c1996989bde043c2935b4c.cu | #include "FVL/FVLib.h"
#include "FVL/FVXMLWriter.h"
#include "FVL/FVArray.h"
#include "FVio.h"
#include "FVL/FVParameters.h"
using namespace std;
#ifdef NO_CUDA
#include "kernels_cpu.h"
#else
#include <cuda.h>
#include "kernels_cuda.cuh"
#endif
#define BLOCK_SIZE_FLUX 512
#define BLOCK_SIZE_UPDATE 512
#define GRID_SIZE(elems, threads) ((int) std::ceil((double)elems/threads))
typedef struct _parameters {
string mesh_file;
string velocity_file;
string initial_file;
string output_file;
double final_time;
double anim_time;
int anim_jump;
double dirichlet;
double CFL;
} Parameters;
#define _USE_MATH_DEFINES
#include <math.h>
#include <limits>
#include <set>
void prepare_mesh_test_data(CFVMesh2D &mesh, CFVArray<double> &polution) {
double min_x = std::numeric_limits<double>::max();
double max_x = std::numeric_limits<double>::min();
/* find min and max x coords of the mesh edges */
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
double current = mesh.edge_centroids.x[i];
if (current < min_x) min_x = current;
if (current > max_x) max_x = current;
}
cout << endl << "Linking mesh ends" << endl;
/* This assumes the mesh is rectangular, and we want to connect the left side with the right side
* that is, for every edge E with x = min_x, and no right cell, make the right cell equal to the left cell of the corresponding edge on the right side, and vice-versa
**/
set<unsigned int> left_cells;
set<unsigned int> right_cells;
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
if (mesh.edge_types[i] == FV_EDGE_DIRICHLET) {
if (mesh.edge_centroids.x[i] == min_x)
left_cells.insert(i);
if (mesh.edge_centroids.x[i] == max_x)
right_cells.insert(i);
}
}
set<unsigned int>::iterator left_it, right_it;
for(left_it = left_cells.begin(), right_it = right_cells.begin();
left_it != left_cells.end();
++left_it, ++right_it) {
unsigned int l = *left_it;
unsigned int r = *right_it;
/* set edges type to regular */
mesh.edge_types[l] = FV_EDGE;
mesh.edge_types[r] = FV_EDGE;
/* link both edges */
mesh.edge_right_cells[l] = mesh.edge_left_cells[r];
mesh.edge_right_cells[r] = mesh.edge_left_cells[l];
cout << "linking edge " << *left_it << " with " << *right_it << endl;
}
cout << "Linked " << left_cells.size() << " pairs of edges " << endl << endl;
}
// TODO: interface decente para paremetros xml
Parameters read_parameters (string parameters_filename) {
Parameters data;
FVParameters para(parameters_filename);
data.mesh_file = para.getString("MeshName");
data.velocity_file = para.getString("VelocityFile");
data.initial_file = para.getString("PoluInitFile");
data.output_file = para.getString("OutputFile");
data.final_time = para.getDouble("FinalTime");
data.anim_time = para.getDouble("AnimTimeStep");
data.anim_jump = para.getInteger("NbJump");
data.dirichlet = para.getDouble("DirichletCondition");
data.CFL = para.getDouble("CFL");
return data;
}
// TODO: convert to cuda
double cpu_compute_mesh_parameter(CFVMesh2D &mesh) {
double h;
double S;
h = 1.e20;
for(unsigned int cell = 0; cell < mesh.num_cells; ++cell) {
S = mesh.cell_areas[cell];
for(unsigned int edge = 0; edge < mesh.cell_edges_count[cell]; ++edge) {
double length = mesh.edge_lengths[edge];
if (h * length > S)
h = S / length;
}
}
return h;
}
void cpu_compute_edge_velocities(CFVMesh2D &mesh, CFVPoints2D<double> &velocities, CFVArray<double> &vs, double &v_max) {
for(unsigned int i = 0; i < mesh.num_edges; ++i) {
unsigned int left = mesh.edge_left_cells[i];
unsigned int right = mesh.edge_right_cells[i];
if (right == NO_RIGHT_CELL)
right = left;
double v = ((velocities.x[left] + velocities.x[right]) * 0.5 * mesh.edge_normals.x[i])
+ ((velocities.y[left] + velocities.y[right]) * 0.5 * mesh.edge_normals.y[i]);
vs[i] = v;
if (abs(v) > v_max || i == 0) {
v_max = abs(v);
}
}
}
void cudaSafe(cudaError_t error, const string msg) {
if (error != cudaSuccess) {
cerr << "Error: " << msg << " : " << error << endl;
exit(-1);
}
}
void cudaCheckError(const string msg) {
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
cerr << "Error: " << msg << " : " << cudaGetErrorString(error) << endl;
exit(-1);
}
}
int main(int argc, char **argv) {
#ifdef NO_CUDA
cout << "Running in NO_CUDA mode" << endl;
#endif
// var declaration
int i = 0;
double h, t, dt, v_max = 0;
string name;
// read params
Parameters data;
if (argc != 2) {
cerr << "Arg warning: no xml param filename specified. Defaulting to param.xml" << endl;
data = read_parameters("param.xml");
} else
data = read_parameters(argv[1]);
// read mesh
FVL::CFVMesh2D mesh(data.mesh_file);
FVL::CFVPoints2D<double> velocities(mesh.num_cells);
FVL::CFVArray<double> polution(mesh.num_cells);
FVL::CFVArray<double> flux(mesh.num_edges);
FVL::CFVArray<double> vs(mesh.num_edges);
// read other input files
FVL::FVXMLReader velocity_reader(data.velocity_file);
FVL::FVXMLReader polu_ini_reader(data.initial_file);
polu_ini_reader.getVec(polution, t, name);
velocity_reader.getPoints2D(velocities, t, name);
polu_ini_reader.close();
velocity_reader.close();
/* assign test value for polution */
prepare_mesh_test_data(mesh, polution);
FVL::FVXMLWriter polution_writer(data.output_file);
polution_writer.append(polution, t, "polution");
// compute velocity vector
// TODO: Convert to CUDA
cpu_compute_edge_velocities(mesh, velocities, vs, v_max);
h = cpu_compute_mesh_parameter(mesh);
// TODO trocar 1.0 por parametro CFL (com valores entre 0 e 1, 0.3 para esquema de ordem 2)
dt = data.CFL / v_max * h;
#ifndef NO_CUDA
// saves whole mesh to CUDA memory
mesh.cuda_malloc();
polution.cuda_malloc();
flux.cuda_malloc();
vs.cuda_malloc();
// data copy
cudaStream_t stream;
cudaStreamCreate(&stream);
mesh.cuda_save(stream);
polution.cuda_save(stream);
vs.cuda_save(stream);
// sizes of each kernel
// TODO: mudar BLOCK_SIZE_FLUX para MAT_A
dim3 grid_flux(GRID_SIZE(mesh.num_edges, BLOCK_SIZE_FLUX), 1, 1);
dim3 block_flux(BLOCK_SIZE_FLUX, 1, 1);
dim3 grid_update(GRID_SIZE(mesh.num_cells, BLOCK_SIZE_UPDATE), 1, 1);
dim3 block_update(BLOCK_SIZE_UPDATE, 1, 1);
#endif
bool finished = false;
double anim_next_step = data.anim_time;
cout << "dt= " << dt << endl;
while (!finished) {
//while(t < data.final_time) {
cout << "time: " << t << " iteration: " << i << "\r";
if (t + dt > data.final_time) {
cout << endl << "Final iteration, adjusting dt" << endl;
dt = data.final_time - t;
finished = true;
}
// Cpu version
#ifdef NO_CUDA
/* compute flux */
cpu_compute_flux(mesh, vs, polution, flux, data.dirichlet);
/* update */
cpu_update(mesh, polution, flux, dt);
#else
//kernel_compute_flux<<< grid_flux, block_flux >>>(mesh.cuda_get(), polution.cuda_get(), vs.cuda_get(), flux.cuda_get(), data.dirichlet);
_DEBUG {
stringstream ss;
ss << "cuda[compute_flux] i=" << i;
cudaCheckError(ss.str());
}
//kernel_update<<< grid_update, block_update >>>(mesh.cuda_get(), polution.cuda_get(), flux.cuda_get(), data.dirichlet);
_DEBUG {
stringstream ss;
ss << "cuda[update] i=" << i;
cudaCheckError(ss.str());
}
#endif
t += dt;
if (t >= anim_next_step) {
#ifndef NO_CUDA
polution.cuda_get();
#endif
polution_writer.append(polution, t, "polution");
anim_next_step += data.anim_time;
}
++i;
}
polution_writer.save();
polution_writer.close();
#ifndef NO_CUDA
polution.cuda_free();
flux.cuda_free();
#endif
cout << endl << "exiting" << endl;
}
|
53a0446ad98ef22c2546717f44417de766419268.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
/* #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" */
__global__
void
AddOneKernel(const float* in, const int N, float* out) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x)
{
out[i] = in[i] + 1;
}
}
void AddOneKernelLauncher(const float* in, const int N, float* out) {
hipLaunchKernelGGL(( AddOneKernel), dim3(32), dim3(256), 0, 0, in, N, out);
}
#endif
| 53a0446ad98ef22c2546717f44417de766419268.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
/* #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" */
__global__
void
AddOneKernel(const float* in, const int N, float* out) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x)
{
out[i] = in[i] + 1;
}
}
void AddOneKernelLauncher(const float* in, const int N, float* out) {
AddOneKernel<<<32, 256>>>(in, N, out);
}
#endif
|
0f52b0187f0e166eefec9b67f564485d5d8dc58f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<real, 3>
#define DeviceTensor1 THCDeviceTensor<real, 1>
template <int Dim>
static THCDeviceTensor<real, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<real, Dim>();
}
int inDim = THCTensor_nDimension(state, t);
if (inDim == Dim) {
return toDeviceTensor<real, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(THCTensor_isContiguous(state, t));
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = t->size[i];
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= t->size[i];
}
}
return THCDeviceTensor<real, Dim>(THCTensor_(data)(state, t), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
if (train) {
int64_t nInput = THCTensor_(size)(state, input_, 1);
THCTensor_(resize1d)(state, saveMean_, nInput);
THCTensor_(resize1d)(state, saveStd_, nInput);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(hipGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_);
DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_);
DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_);
DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(hipGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
| 0f52b0187f0e166eefec9b67f564485d5d8dc58f.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<real, 3>
#define DeviceTensor1 THCDeviceTensor<real, 1>
template <int Dim>
static THCDeviceTensor<real, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<real, Dim>();
}
int inDim = THCTensor_nDimension(state, t);
if (inDim == Dim) {
return toDeviceTensor<real, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(THCTensor_isContiguous(state, t));
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = t->size[i];
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= t->size[i];
}
}
return THCDeviceTensor<real, Dim>(THCTensor_(data)(state, t), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
if (train) {
int64_t nInput = THCTensor_(size)(state, input_, 1);
THCTensor_(resize1d)(state, saveMean_, nInput);
THCTensor_(resize1d)(state, saveStd_, nInput);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(cudaGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_);
DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_);
DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_);
DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(cudaGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
|
a5c0bf661224035dae5768fd4f0f26e991f0d995.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_mass_flux_z;
int xdim0_advec_mom_kernel_mass_flux_z_h = -1;
__constant__ int ydim0_advec_mom_kernel_mass_flux_z;
int ydim0_advec_mom_kernel_mass_flux_z_h = -1;
__constant__ int xdim1_advec_mom_kernel_mass_flux_z;
int xdim1_advec_mom_kernel_mass_flux_z_h = -1;
__constant__ int ydim1_advec_mom_kernel_mass_flux_z;
int ydim1_advec_mom_kernel_mass_flux_z_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_mass_flux_z * (y) + \
xdim0_advec_mom_kernel_mass_flux_z * ydim0_advec_mom_kernel_mass_flux_z * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_mass_flux_z * (y) + \
xdim1_advec_mom_kernel_mass_flux_z * ydim1_advec_mom_kernel_mass_flux_z * \
(z))
// user function
__device__
inline void
advec_mom_kernel_mass_flux_z(double *node_flux, const double *mass_flux_z) {
node_flux[OPS_ACC0(0, 0, 0)] =
0.125 *
(mass_flux_z[OPS_ACC1(-1, 0, 0)] + mass_flux_z[OPS_ACC1(0, 0, 0)] +
mass_flux_z[OPS_ACC1(-1, 0, 1)] + mass_flux_z[OPS_ACC1(0, 0, 1)] +
mass_flux_z[OPS_ACC1(-1, -1, 0)] + mass_flux_z[OPS_ACC1(0, -1, 0)] +
mass_flux_z[OPS_ACC1(-1, -1, 1)] + mass_flux_z[OPS_ACC1(0, -1, 1)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_advec_mom_kernel_mass_flux_z(double *__restrict arg0,
const double *__restrict arg1,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_z +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_z *
ydim0_advec_mom_kernel_mass_flux_z;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_z +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_z *
ydim1_advec_mom_kernel_mass_flux_z;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_mass_flux_z(arg0, arg1);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel_mass_flux_z(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 33))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(33, "advec_mom_kernel_mass_flux_z");
OPS_kernels[33].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_mass_flux_z_h ||
ydim0 != ydim0_advec_mom_kernel_mass_flux_z_h ||
xdim1 != xdim1_advec_mom_kernel_mass_flux_z_h ||
ydim1 != ydim1_advec_mom_kernel_mass_flux_z_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel_mass_flux_z, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_mass_flux_z_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel_mass_flux_z, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_mass_flux_z_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel_mass_flux_z, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_mass_flux_z_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel_mass_flux_z, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_mass_flux_z_h = ydim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[33].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel_mass_flux_z), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[33].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[33].mpi_time += t2 - t1;
OPS_kernels[33].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[33].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| a5c0bf661224035dae5768fd4f0f26e991f0d995.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_mass_flux_z;
int xdim0_advec_mom_kernel_mass_flux_z_h = -1;
__constant__ int ydim0_advec_mom_kernel_mass_flux_z;
int ydim0_advec_mom_kernel_mass_flux_z_h = -1;
__constant__ int xdim1_advec_mom_kernel_mass_flux_z;
int xdim1_advec_mom_kernel_mass_flux_z_h = -1;
__constant__ int ydim1_advec_mom_kernel_mass_flux_z;
int ydim1_advec_mom_kernel_mass_flux_z_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_mass_flux_z * (y) + \
xdim0_advec_mom_kernel_mass_flux_z * ydim0_advec_mom_kernel_mass_flux_z * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_mass_flux_z * (y) + \
xdim1_advec_mom_kernel_mass_flux_z * ydim1_advec_mom_kernel_mass_flux_z * \
(z))
// user function
__device__
inline void
advec_mom_kernel_mass_flux_z(double *node_flux, const double *mass_flux_z) {
node_flux[OPS_ACC0(0, 0, 0)] =
0.125 *
(mass_flux_z[OPS_ACC1(-1, 0, 0)] + mass_flux_z[OPS_ACC1(0, 0, 0)] +
mass_flux_z[OPS_ACC1(-1, 0, 1)] + mass_flux_z[OPS_ACC1(0, 0, 1)] +
mass_flux_z[OPS_ACC1(-1, -1, 0)] + mass_flux_z[OPS_ACC1(0, -1, 0)] +
mass_flux_z[OPS_ACC1(-1, -1, 1)] + mass_flux_z[OPS_ACC1(0, -1, 1)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_advec_mom_kernel_mass_flux_z(double *__restrict arg0,
const double *__restrict arg1,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_z +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_z *
ydim0_advec_mom_kernel_mass_flux_z;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_z +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_z *
ydim1_advec_mom_kernel_mass_flux_z;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_mass_flux_z(arg0, arg1);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel_mass_flux_z(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 33))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(33, "advec_mom_kernel_mass_flux_z");
OPS_kernels[33].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_mass_flux_z_h ||
ydim0 != ydim0_advec_mom_kernel_mass_flux_z_h ||
xdim1 != xdim1_advec_mom_kernel_mass_flux_z_h ||
ydim1 != ydim1_advec_mom_kernel_mass_flux_z_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel_mass_flux_z, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_mass_flux_z_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel_mass_flux_z, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_mass_flux_z_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel_mass_flux_z, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_mass_flux_z_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel_mass_flux_z, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_mass_flux_z_h = ydim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[33].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel_mass_flux_z<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[33].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[33].mpi_time += t2 - t1;
OPS_kernels[33].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[33].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.